comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
``` if (locationIp == null) { } else { .. } ``` ?
private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); }
if (locationIp != null) {
private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
``` if (host == null) { ... } else { ..} ``` ?
private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; }
Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT);
private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
Perhaps the addition of a `getNotNullAttribute(AttributeKey... attributeKeys)` method returning the the first non-null attribute could simplify the code in several places. This method may belong to the `SemanticAttributes` class.
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
Long port = attributes.get(SemanticAttributes.SERVER_PORT);
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
this isn't the same, since `locationIp` may or may not be assigned in the if condition
private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); }
if (locationIp != null) {
private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
same as above (if I understand your suggestion correctly)
private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; }
Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT);
private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
it's not clear to me how this would work here, can you make a (full) github "suggestion" so I can understand better?
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
Long port = attributes.get(SemanticAttributes.SERVER_PORT);
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
Sorry, I had read the code too fast. The logic is quite different between the methods.
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
Long port = attributes.get(SemanticAttributes.SERVER_PORT);
private static String getHttpUrlFromServerSpanStableSemconv(Attributes attributes) { String scheme = attributes.get(SemanticAttributes.URL_SCHEME); if (scheme == null) { return null; } String path = attributes.get(SemanticAttributes.URL_PATH); if (path == null) { return null; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host == null) { return null; } Long port = attributes.get(SemanticAttributes.SERVER_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder() .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
class SpanDataMapper { public static final String MS_PROCESSED_BY_METRIC_EXTRACTORS = "_MS.ProcessedByMetricExtractors"; private static final Set<String> SQL_DB_SYSTEMS = new HashSet<>( asList( SemanticAttributes.DbSystemValues.DB2, SemanticAttributes.DbSystemValues.DERBY, SemanticAttributes.DbSystemValues.MARIADB, SemanticAttributes.DbSystemValues.MSSQL, SemanticAttributes.DbSystemValues.MYSQL, SemanticAttributes.DbSystemValues.ORACLE, SemanticAttributes.DbSystemValues.POSTGRESQL, SemanticAttributes.DbSystemValues.SQLITE, SemanticAttributes.DbSystemValues.OTHER_SQL, SemanticAttributes.DbSystemValues.HSQLDB, SemanticAttributes.DbSystemValues.H2)); private static final String COSMOS = "Cosmos"; private static final Mappings MAPPINGS; private static final ContextTagKeys AI_DEVICE_OS = ContextTagKeys.fromString("ai.device.os"); static { MappingsBuilder mappingsBuilder = new MappingsBuilder(SPAN) .ignoreExact(AiSemanticAttributes.AZURE_SDK_NAMESPACE.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION.getKey()) .ignoreExact(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS.getKey()) .ignoreExact(AiSemanticAttributes.KAFKA_OFFSET.getKey()) .exact( SemanticAttributes.USER_AGENT_ORIGINAL.getKey(), (builder, value) -> { if (value instanceof String) { builder.addTag("ai.user.userAgent", (String) value); } }) .ignorePrefix("applicationinsights.internal.") .prefix( "http.request.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }) .prefix( "http.response.header.", (telemetryBuilder, key, value) -> { if (value instanceof List) { telemetryBuilder.addProperty(key, Mappings.join((List<?>) value)); } }); applyCommonTags(mappingsBuilder); MAPPINGS = mappingsBuilder.build(); } private final boolean captureHttpServer4xxAsError; private final BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer; private final BiPredicate<EventData, String> eventSuppressor; private final BiPredicate<SpanData, EventData> shouldSuppress; public SpanDataMapper( boolean captureHttpServer4xxAsError, BiConsumer<AbstractTelemetryBuilder, Resource> telemetryInitializer, BiPredicate<EventData, String> eventSuppressor, BiPredicate<SpanData, EventData> shouldSuppress) { this.captureHttpServer4xxAsError = captureHttpServer4xxAsError; this.telemetryInitializer = telemetryInitializer; this.eventSuppressor = eventSuppressor; this.shouldSuppress = shouldSuppress; } public TelemetryItem map(SpanData span) { long itemCount = getItemCount(span); return map(span, itemCount); } public void map(SpanData span, Consumer<TelemetryItem> consumer) { long itemCount = getItemCount(span); TelemetryItem telemetryItem = map(span, itemCount); consumer.accept(telemetryItem); exportEvents( span, telemetryItem.getTags().get(ContextTagKeys.AI_OPERATION_NAME.toString()), itemCount, consumer); } public TelemetryItem map(SpanData span, long itemCount) { if (RequestChecker.isRequest(span)) { return exportRequest(span, itemCount); } else { return exportRemoteDependency(span, span.getKind() == SpanKind.INTERNAL, itemCount); } } private static boolean checkIsPreAggregatedStandardMetric(SpanData span) { Boolean isPreAggregatedStandardMetric = span.getAttributes().get(AiSemanticAttributes.IS_PRE_AGGREGATED); return isPreAggregatedStandardMetric != null && isPreAggregatedStandardMetric; } private TelemetryItem exportRemoteDependency(SpanData span, boolean inProc, long itemCount) { RemoteDependencyTelemetryBuilder telemetryBuilder = RemoteDependencyTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationTags(telemetryBuilder, span); setTime(telemetryBuilder, span.getStartEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); telemetryBuilder.setId(span.getSpanId()); telemetryBuilder.setName(getDependencyName(span)); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - span.getStartEpochNanos())); telemetryBuilder.setSuccess(getSuccess(span)); if (inProc) { telemetryBuilder.setType("InProc"); } else { applySemanticConventions(telemetryBuilder, span); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } return telemetryBuilder.build(); } private static final Set<String> DEFAULT_HTTP_SPAN_NAMES = new HashSet<>( asList("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT", "PATCH")); private static String getDependencyName(SpanData span) { String name = span.getName(); String method = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (method == null) { return name; } if (!DEFAULT_HTTP_SPAN_NAMES.contains(name)) { return name; } String url = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); if (url == null) { return name; } String path = UrlParser.getPath(url); if (path == null) { return name; } return path.isEmpty() ? method + " /" : method + " " + path; } private static void applySemanticConventions( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanData span) { Attributes attributes = span.getAttributes(); String httpMethod = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_REQUEST_METHOD, SemanticAttributes.HTTP_METHOD); if (httpMethod != null) { applyHttpClientSpan(telemetryBuilder, attributes); return; } String rpcSystem = attributes.get(SemanticAttributes.RPC_SYSTEM); if (rpcSystem != null) { applyRpcClientSpan(telemetryBuilder, rpcSystem, attributes); return; } String dbSystem = attributes.get(SemanticAttributes.DB_SYSTEM); if (dbSystem == null) { dbSystem = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_TYPE); } if (dbSystem != null) { applyDatabaseClientSpan(telemetryBuilder, dbSystem, attributes); return; } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem != null) { applyMessagingClientSpan(telemetryBuilder, span.getKind(), messagingSystem, attributes); return; } String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, null); if (target != null) { telemetryBuilder.setTarget(target); return; } telemetryBuilder.setType("InProc"); } @Nullable private static String getMessagingSystem(Attributes attributes) { String azureNamespace = attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE); if (isAzureSdkMessaging(azureNamespace)) { return azureNamespace; } return attributes.get(SemanticAttributes.MESSAGING_SYSTEM); } private static void setOperationTags(AbstractTelemetryBuilder telemetryBuilder, SpanData span) { setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getParentSpanContext().getSpanId()); setOperationName(telemetryBuilder, span.getAttributes()); } private static void setOperationId(AbstractTelemetryBuilder telemetryBuilder, String traceId) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), traceId); } private static void setOperationParentId( AbstractTelemetryBuilder telemetryBuilder, String parentSpanId) { if (SpanId.isValid(parentSpanId)) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), parentSpanId); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, Attributes attributes) { String operationName = attributes.get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } } private static void setOperationName( AbstractTelemetryBuilder telemetryBuilder, String operationName) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); } private static void applyHttpClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, Attributes attributes) { String httpUrl = getStableOrOldAttribute(attributes, SemanticAttributes.URL_FULL, SemanticAttributes.HTTP_URL); int defaultPort = getDefaultPortForHttpUrl(httpUrl); String target = getTargetOrDefault(attributes, defaultPort, "Http"); telemetryBuilder.setType("Http"); telemetryBuilder.setTarget(target); Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode != null) { telemetryBuilder.setResultCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResultCode("0"); } telemetryBuilder.setData(httpUrl); } private static void applyRpcClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String rpcSystem, Attributes attributes) { telemetryBuilder.setType(rpcSystem); String target = getTargetOrDefault(attributes, Integer.MAX_VALUE, rpcSystem); telemetryBuilder.setTarget(target); } private static int getDefaultPortForHttpUrl(@Nullable String httpUrl) { if (httpUrl == null) { return Integer.MAX_VALUE; } if (httpUrl.startsWith("https: return 443; } if (httpUrl.startsWith("http: return 80; } return Integer.MAX_VALUE; } public static String getTargetOrDefault( Attributes attributes, int defaultPort, String defaultTarget) { String target = getTargetOrNullStableSemconv(attributes, defaultPort); if (target != null) { return target; } target = getTargetOrNullOldSemconv(attributes, defaultPort); if (target != null) { return target; } return defaultTarget; } @Nullable private static String getTargetOrNullStableSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.SERVER_ADDRESS); if (host != null) { Long port = attributes.get(SemanticAttributes.SERVER_PORT); return getTarget(host, port, defaultPort); } return null; } @Nullable private static String getTargetOrNullOldSemconv(Attributes attributes, int defaultPort) { String peerService = attributes.get(SemanticAttributes.PEER_SERVICE); if (peerService != null) { return peerService; } String host = attributes.get(SemanticAttributes.NET_PEER_NAME); if (host != null) { Long port = attributes.get(SemanticAttributes.NET_PEER_PORT); return getTarget(host, port, defaultPort); } host = attributes.get(SemanticAttributes.NET_SOCK_PEER_NAME); if (host == null) { host = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (host != null) { Long port = attributes.get(SemanticAttributes.NET_SOCK_PEER_PORT); return getTarget(host, port, defaultPort); } String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return UrlParser.getTarget(httpUrl); } return null; } private static String getTarget(String host, @Nullable Long port, int defaultPort) { if (port != null && port != defaultPort) { return host + ":" + port; } else { return host; } } private static void applyDatabaseClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, String dbSystem, Attributes attributes) { String dbStatement = attributes.get(SemanticAttributes.DB_STATEMENT); if (dbStatement == null) { dbStatement = attributes.get(SemanticAttributes.DB_OPERATION); } String type; if (SQL_DB_SYSTEMS.contains(dbSystem)) { if (dbSystem.equals(SemanticAttributes.DbSystemValues.MYSQL)) { type = "mysql"; } else if (dbSystem.equals(SemanticAttributes.DbSystemValues.POSTGRESQL)) { type = "postgresql"; } else { type = "SQL"; } } else if (dbSystem.equals(COSMOS)) { type = "Microsoft.DocumentDb"; } else { type = dbSystem; } telemetryBuilder.setType(type); telemetryBuilder.setData(dbStatement); String target; String dbName; if (dbSystem.equals(COSMOS)) { String dbUrl = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_URL); if (dbUrl != null) { target = UrlParser.getTarget(dbUrl); } else { target = null; } dbName = attributes.get(AiSemanticAttributes.AZURE_SDK_DB_INSTANCE); } else { target = getTargetOrDefault(attributes, getDefaultPortForDbSystem(dbSystem), dbSystem); dbName = attributes.get(SemanticAttributes.DB_NAME); } target = nullAwareConcat(target, dbName, " | "); if (target == null) { target = dbSystem; } telemetryBuilder.setTarget(target); } private static void applyMessagingClientSpan( RemoteDependencyTelemetryBuilder telemetryBuilder, SpanKind spanKind, String messagingSystem, Attributes attributes) { if (spanKind == SpanKind.PRODUCER) { telemetryBuilder.setType("Queue Message | " + messagingSystem); } else { telemetryBuilder.setType(messagingSystem); } telemetryBuilder.setTarget(getMessagingTargetSource(attributes)); } private static int getDefaultPortForDbSystem(String dbSystem) { switch (dbSystem) { case SemanticAttributes.DbSystemValues.MONGODB: return 27017; case SemanticAttributes.DbSystemValues.CASSANDRA: return 9042; case SemanticAttributes.DbSystemValues.REDIS: return 6379; case SemanticAttributes.DbSystemValues.MARIADB: case SemanticAttributes.DbSystemValues.MYSQL: return 3306; case SemanticAttributes.DbSystemValues.MSSQL: return 1433; case SemanticAttributes.DbSystemValues.DB2: return 50000; case SemanticAttributes.DbSystemValues.ORACLE: return 1521; case SemanticAttributes.DbSystemValues.H2: return 8082; case SemanticAttributes.DbSystemValues.DERBY: return 1527; case SemanticAttributes.DbSystemValues.POSTGRESQL: return 5432; default: return Integer.MAX_VALUE; } } private TelemetryItem exportRequest(SpanData span, long itemCount) { RequestTelemetryBuilder telemetryBuilder = RequestTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); Attributes attributes = span.getAttributes(); long startEpochNanos = span.getStartEpochNanos(); telemetryBuilder.setId(span.getSpanId()); setTime(telemetryBuilder, startEpochNanos); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(attributes, telemetryBuilder); addLinks(telemetryBuilder, span.getLinks()); String operationName = getOperationName(span); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_NAME.toString(), operationName); telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_ID.toString(), span.getTraceId()); String aiLegacyParentId = span.getAttributes().get(AiSemanticAttributes.LEGACY_PARENT_ID); if (aiLegacyParentId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), aiLegacyParentId); } else if (span.getParentSpanContext().isValid()) { telemetryBuilder.addTag( ContextTagKeys.AI_OPERATION_PARENT_ID.toString(), span.getParentSpanContext().getSpanId()); } String aiLegacyRootId = span.getAttributes().get(AiSemanticAttributes.LEGACY_ROOT_ID); if (aiLegacyRootId != null) { telemetryBuilder.addTag("ai_legacyRootID", aiLegacyRootId); } telemetryBuilder.setName(operationName); telemetryBuilder.setDuration( FormattedDuration.fromNanos(span.getEndEpochNanos() - startEpochNanos)); telemetryBuilder.setSuccess(getSuccess(span)); String httpUrl = getHttpUrlFromServerSpan(attributes); if (httpUrl != null) { telemetryBuilder.setUrl(httpUrl); } Long httpStatusCode = getStableOrOldAttribute(attributes, SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); if (httpStatusCode == null) { httpStatusCode = attributes.get(SemanticAttributes.RPC_GRPC_STATUS_CODE); } if (httpStatusCode != null) { telemetryBuilder.setResponseCode(Long.toString(httpStatusCode)); } else { telemetryBuilder.setResponseCode("0"); } String locationIp = getStableOrOldAttribute(attributes, SemanticAttributes.CLIENT_ADDRESS, SemanticAttributes.HTTP_CLIENT_IP); if (locationIp == null) { locationIp = attributes.get(SemanticAttributes.NET_SOCK_PEER_ADDR); } if (locationIp != null) { telemetryBuilder.addTag(ContextTagKeys.AI_LOCATION_IP.toString(), locationIp); } telemetryBuilder.setSource(getSource(attributes)); String sessionId = attributes.get(AiSemanticAttributes.SESSION_ID); if (sessionId != null) { telemetryBuilder.addTag(ContextTagKeys.AI_SESSION_ID.toString(), sessionId); } String deviceOs = attributes.get(AiSemanticAttributes.DEVICE_OS); if (deviceOs != null) { telemetryBuilder.addTag(AI_DEVICE_OS.toString(), deviceOs); } String deviceOsVersion = attributes.get(AiSemanticAttributes.DEVICE_OS_VERSION); if (deviceOsVersion != null) { telemetryBuilder.addTag(ContextTagKeys.AI_DEVICE_OS_VERSION.toString(), deviceOsVersion); } if (checkIsPreAggregatedStandardMetric(span)) { telemetryBuilder.addProperty(MS_PROCESSED_BY_METRIC_EXTRACTORS, "True"); } Long enqueuedTime = attributes.get(AiSemanticAttributes.AZURE_SDK_ENQUEUED_TIME); if (enqueuedTime != null) { long timeSinceEnqueuedMillis = Math.max( 0L, NANOSECONDS.toMillis(span.getStartEpochNanos()) - SECONDS.toMillis(enqueuedTime)); telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } Long timeSinceEnqueuedMillis = attributes.get(AiSemanticAttributes.KAFKA_RECORD_QUEUE_TIME_MS); if (timeSinceEnqueuedMillis != null) { telemetryBuilder.addMeasurement("timeSinceEnqueued", (double) timeSinceEnqueuedMillis); } return telemetryBuilder.build(); } private boolean getSuccess(SpanData span) { switch (span.getStatus().getStatusCode()) { case ERROR: return false; case OK: return true; case UNSET: if (captureHttpServer4xxAsError) { Long statusCode = getStableOrOldAttribute(span.getAttributes(), SemanticAttributes.HTTP_RESPONSE_STATUS_CODE, SemanticAttributes.HTTP_STATUS_CODE); return statusCode == null || statusCode < 400; } return true; } return true; } @Nullable public static String getHttpUrlFromServerSpan(Attributes attributes) { String httpUrl = getHttpUrlFromServerSpanStableSemconv(attributes); if (httpUrl != null) { return httpUrl; } return getHttpUrlFromServerSpanOldSemconv(attributes); } @Nullable @Nullable private static String getHttpUrlFromServerSpanOldSemconv(Attributes attributes) { String httpUrl = attributes.get(SemanticAttributes.HTTP_URL); if (httpUrl != null) { return httpUrl; } String scheme = attributes.get(SemanticAttributes.HTTP_SCHEME); if (scheme == null) { return null; } String target = attributes.get(SemanticAttributes.HTTP_TARGET); if (target == null) { return null; } String host = attributes.get(SemanticAttributes.NET_HOST_NAME); Long port = attributes.get(SemanticAttributes.NET_HOST_PORT); if (port != null && port > 0) { return scheme + ": } return scheme + ": } @Nullable private static String getSource(Attributes attributes) { String source = attributes.get(AiSemanticAttributes.SPAN_SOURCE); if (source != null) { return source; } return getMessagingTargetSource(attributes); } @Nullable private static String getMessagingTargetSource(Attributes attributes) { if (isAzureSdkMessaging(attributes.get(AiSemanticAttributes.AZURE_SDK_NAMESPACE))) { String peerAddress = attributes.get(AiSemanticAttributes.AZURE_SDK_PEER_ADDRESS); if (peerAddress != null) { String destination = attributes.get(AiSemanticAttributes.AZURE_SDK_MESSAGE_BUS_DESTINATION); return peerAddress + "/" + destination; } } String messagingSystem = getMessagingSystem(attributes); if (messagingSystem == null) { return null; } String source = nullAwareConcat( getTargetOrNullOldSemconv(attributes, Integer.MAX_VALUE), attributes.get(SemanticAttributes.MESSAGING_DESTINATION_NAME), "/"); if (source != null) { return source; } return messagingSystem; } private static boolean isAzureSdkMessaging(String messagingSystem) { return "Microsoft.EventHub".equals(messagingSystem) || "Microsoft.ServiceBus".equals(messagingSystem); } private static String getOperationName(SpanData span) { String operationName = span.getAttributes().get(AiSemanticAttributes.OPERATION_NAME); if (operationName != null) { return operationName; } return span.getName(); } private static String nullAwareConcat( @Nullable String str1, @Nullable String str2, String separator) { if (str1 == null) { return str2; } if (str2 == null) { return str1; } return str1 + separator + str2; } private void exportEvents( SpanData span, @Nullable String operationName, long itemCount, Consumer<TelemetryItem> consumer) { for (EventData event : span.getEvents()) { String instrumentationScopeName = span.getInstrumentationScopeInfo().getName(); if (eventSuppressor.test(event, instrumentationScopeName)) { continue; } if (event.getAttributes().get(SemanticAttributes.EXCEPTION_TYPE) != null || event.getAttributes().get(SemanticAttributes.EXCEPTION_MESSAGE) != null) { SpanContext parentSpanContext = span.getParentSpanContext(); if (!parentSpanContext.isValid() || parentSpanContext.isRemote()) { String stacktrace = event.getAttributes().get(SemanticAttributes.EXCEPTION_STACKTRACE); if (stacktrace != null && !shouldSuppress.test(span, event)) { consumer.accept( createExceptionTelemetryItem(stacktrace, span, operationName, itemCount)); } } return; } MessageTelemetryBuilder telemetryBuilder = MessageTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, event.getEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(event.getAttributes(), telemetryBuilder); telemetryBuilder.setMessage(event.getName()); consumer.accept(telemetryBuilder.build()); } } private TelemetryItem createExceptionTelemetryItem( String errorStack, SpanData span, @Nullable String operationName, long itemCount) { ExceptionTelemetryBuilder telemetryBuilder = ExceptionTelemetryBuilder.create(); telemetryInitializer.accept(telemetryBuilder, span.getResource()); setOperationId(telemetryBuilder, span.getTraceId()); setOperationParentId(telemetryBuilder, span.getSpanId()); if (operationName != null) { setOperationName(telemetryBuilder, operationName); } else { setOperationName(telemetryBuilder, span.getAttributes()); } setTime(telemetryBuilder, span.getEndEpochNanos()); setItemCount(telemetryBuilder, itemCount); MAPPINGS.map(span.getAttributes(), telemetryBuilder); telemetryBuilder.setExceptions(Exceptions.minimalParse(errorStack)); return telemetryBuilder.build(); } public static <T> T getStableOrOldAttribute(Attributes attributes, AttributeKey<T> stable, AttributeKey<T> old) { T value = attributes.get(stable); if (value != null) { return value; } return attributes.get(old); } private static void setTime(AbstractTelemetryBuilder telemetryBuilder, long epochNanos) { telemetryBuilder.setTime(FormattedTime.offSetDateTimeFromEpochNanos(epochNanos)); } private static void setItemCount(AbstractTelemetryBuilder telemetryBuilder, long itemCount) { if (itemCount != 1) { telemetryBuilder.setSampleRate(100.0f / itemCount); } } private static long getItemCount(SpanData span) { Long itemCount = span.getAttributes().get(AiSemanticAttributes.ITEM_COUNT); return itemCount == null ? 1 : itemCount; } private static void addLinks(AbstractTelemetryBuilder telemetryBuilder, List<LinkData> links) { if (links.isEmpty()) { return; } StringBuilder sb = new StringBuilder(); sb.append("["); boolean first = true; for (LinkData link : links) { if (!first) { sb.append(","); } sb.append("{\"operation_Id\":\""); sb.append(link.getSpanContext().getTraceId()); sb.append("\",\"id\":\""); sb.append(link.getSpanContext().getSpanId()); sb.append("\"}"); first = false; } sb.append("]"); telemetryBuilder.addProperty("_MS.links", sb.toString()); } static void applyCommonTags(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( SemanticAttributes.ENDUSER_ID.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_USER_ID.toString(), (String) value); } }) .exact( AiSemanticAttributes.PREVIEW_APPLICATION_VERSION.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag( ContextTagKeys.AI_APPLICATION_VER.toString(), (String) value); } }); applyConnectionStringAndRoleNameOverrides(mappingsBuilder); } @SuppressWarnings("deprecation") private static final WarningLogger connectionStringAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleNameAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"roleNameOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") private static final WarningLogger roleInstanceAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please reach out to" + " https: + " case for this."); @SuppressWarnings("deprecation") private static final WarningLogger instrumentationKeyAttributeNoLongerSupported = new WarningLogger( SpanDataMapper.class, AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey() + " is no longer supported because it" + " is incompatible with pre-aggregated standard metrics. Please use" + " \"connectionStringOverrides\" configuration, or reach out to" + " https: + " different use case."); @SuppressWarnings("deprecation") static void applyConnectionStringAndRoleNameOverrides(MappingsBuilder mappingsBuilder) { mappingsBuilder .exact( AiSemanticAttributes.INTERNAL_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { telemetryBuilder.setConnectionString(ConnectionString.parse((String) value)); }) .exact( AiSemanticAttributes.INTERNAL_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { if (value instanceof String) { telemetryBuilder.addTag(ContextTagKeys.AI_CLOUD_ROLE.toString(), (String) value); } }) .exact( AiSemanticAttributes.DEPRECATED_CONNECTION_STRING.getKey(), (telemetryBuilder, value) -> { connectionStringAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_NAME.getKey(), (telemetryBuilder, value) -> { roleNameAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_ROLE_INSTANCE.getKey(), (telemetryBuilder, value) -> { roleInstanceAttributeNoLongerSupported.recordWarning(); }) .exact( AiSemanticAttributes.DEPRECATED_INSTRUMENTATION_KEY.getKey(), (telemetryBuilder, value) -> { instrumentationKeyAttributeNoLongerSupported.recordWarning(); }); } }
Please use managed disk instead, it's the current preferred way to create VMs.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .withUserData(userDataForCreate) .create(); Assertions.assertEquals(userDataForCreate, vm.userData()); vm = vm.update().withUserData(userDataForUpdate).apply(); Assertions.assertEquals(userDataForUpdate, vm.userData()); }
.withUnmanagedDisks()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Disabled("The `userData` is not returned, so can not use `Assertions.assertEquals` be determine whether the returned `userData` is correctly.") @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
1. The getter of `userData` is not needed, so remove it. 2. Enabled the test case `VirtualMachineOperationsTests#canCreateAndUpdateVirtualMachineWithUserData`. 3. Used managed disk instead.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .withUserData(userDataForCreate) .create(); Assertions.assertEquals(userDataForCreate, vm.userData()); vm = vm.update().withUserData(userDataForUpdate).apply(); Assertions.assertEquals(userDataForUpdate, vm.userData()); }
.withUnmanagedDisks()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Disabled("The `userData` is not returned, so can not use `Assertions.assertEquals` be determine whether the returned `userData` is correctly.") @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
randomUUID cannot be created this way. The random data need to be recorded in the test record. Use `ResourceManagerUtil.randomUuid`.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); vm.update().withUserData(userDataForUpdate).apply(); }
UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8)));
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Or just use some non-random string. If the content is not important.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); vm.update().withUserData(userDataForUpdate).apply(); }
UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8)));
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Can we use a later windows version? 2012 seems too old.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); vm.update().withUserData(userDataForUpdate).apply(); }
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
1. The value of `userData` changed to two different non-random strings. 2. The version of windows virtual machine image changed to `2019-datacenter-gensecond`.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); String userDataForUpdate = new String(Base64.getEncoder().encode( UUID.randomUUID().toString().toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8))); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); vm.update().withUserData(userDataForUpdate).apply(); }
.withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER)
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Why use serviceClient method for updating VirtualMachine? I guess this can be: ```java vm.update() .withUserData(userDataForUpdate) .apply() ``` right? Otherwise, update method is not covered in test.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); computeManager.serviceClient().getVirtualMachines() .update(rgName, vmName, new VirtualMachineUpdateInner().withUserData(userDataForUpdate)); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
computeManager.serviceClient().getVirtualMachines()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
Using `vm.update().withUserData(userDataForUpdate).apply()` cannot update the `VirtualMachine` service in real time, so use the serviceClient method to updating the `VirtualMachine`.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); computeManager.serviceClient().getVirtualMachines() .update(rgName, vmName, new VirtualMachineUpdateInner().withUserData(userDataForUpdate)); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
computeManager.serviceClient().getVirtualMachines()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
We did some parameter copying from `innerModel` to update parameter. https://github.com/Azure/azure-sdk-for-java/blob/04f9b83c351f6d817da6c73308dc3a37c64d03db/sdk/resourcemanager/azure-resourcemanager-compute/src/main/java/com/azure/resourcemanager/compute/implementation/VirtualMachineImpl.java#L2764-L2781 Probably need to add additional `userData` copy here.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); computeManager.serviceClient().getVirtualMachines() .update(rgName, vmName, new VirtualMachineUpdateInner().withUserData(userDataForUpdate)); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
computeManager.serviceClient().getVirtualMachines()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
```suggestion return ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent().toFluxByteBuffer(), span).block().booleanValue(); ```
protected boolean runInternal(Context span) { try { return Boolean.TRUE.equals(ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent().toFluxByteBuffer(), span).block()); } catch (Exception e) { LOGGER.error("Failed to download blob", e); return false; } }
span).block());
protected boolean runInternal(Context span) { try { return ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent(), span).block().booleanValue(); } catch (Exception e) { LOGGER.error("Failed to download blob", e); return false; } }
class DownloadContent extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadContent.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadContent.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadContent(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadContent().flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.toFluxByteBuffer(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class DownloadContent extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadContent.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadContent.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadContent(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadContent().flatMap(response -> ORIGINAL_CONTENT.checkMatch(response, span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
It has been fixed in the new version and `userData` can be updated.
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); computeManager.serviceClient().getVirtualMachines() .update(rgName, vmName, new VirtualMachineUpdateInner().withUserData(userDataForUpdate)); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
computeManager.serviceClient().getVirtualMachines()
public void canCreateAndUpdateVirtualMachineWithUserData() { String userDataForCreate = "N0ZBN0MxRkYtMkNCMC00RUM3LUE1RDctMDY2MUI0RTdDNzY4"; String userDataForUpdate = "Njc5MDI3MUItQ0RGRC00RjdELUI5NTEtMTA4QjA2RTNGNDRE"; VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_GEN2) .withAdminUsername("Foo12") .withAdminPassword(password()) .withNewDataDisk(127) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withUserData(userDataForCreate) .create(); Response<VirtualMachineInner> response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForCreate, response.getValue().userData()); vm.update().withUserData(userDataForUpdate).apply(); response = computeManager.serviceClient().getVirtualMachines() .getByResourceGroupWithResponse(rgName, vmName, InstanceViewTypes.USER_DATA, Context.NONE); Assertions.assertEquals(userDataForUpdate, response.getValue().userData()); }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
class VirtualMachineOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = ""; private final Region region = Region.US_EAST; private final Region regionProxPlacementGroup = Region.US_WEST; private final Region regionProxPlacementGroup2 = Region.US_EAST; private final String vmName = "javavm"; private final String proxGroupName = "testproxgroup1"; private final String proxGroupName2 = "testproxgroup2"; private final String availabilitySetName = "availset1"; private final String availabilitySetName2 = "availset2"; private final ProximityPlacementGroupType proxGroupType = ProximityPlacementGroupType.STANDARD; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); rgName2 = generateRandomResourceName("javacsmrg2", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { if (rgName != null) { resourceManager.resourceGroups().beginDeleteByName(rgName); } } @Test @Test public void canCreateVirtualMachineWithNetworking() throws Exception { NetworkSecurityGroup nsg = this .networkManager .networkSecurityGroups() .define("nsg") .withRegion(region) .withNewResourceGroup(rgName) .defineRule("rule1") .allowInbound() .fromAnyAddress() .fromPort(80) .toAnyAddress() .toPort(80) .withProtocol(SecurityRuleProtocol.TCP) .attach() .create(); Creatable<Network> networkDefinition = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .defineSubnet("subnet1") .withAddressPrefix("10.0.0.0/29") .withExistingNetworkSecurityGroup(nsg) .attach(); VirtualMachine vm = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork(networkDefinition) .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .create(); NetworkInterface primaryNic = vm.getPrimaryNetworkInterface(); Assertions.assertNotNull(primaryNic); NicIpConfiguration primaryIpConfig = primaryNic.primaryIPConfiguration(); Assertions.assertNotNull(primaryIpConfig); Assertions.assertNotNull(primaryIpConfig.networkId()); Network network = primaryIpConfig.getNetwork(); Assertions.assertNotNull(primaryIpConfig.subnetName()); Subnet subnet = network.subnets().get(primaryIpConfig.subnetName()); Assertions.assertNotNull(subnet); nsg = subnet.getNetworkSecurityGroup(); Assertions.assertNotNull(nsg); Assertions.assertEquals("nsg", nsg.name()); Assertions.assertEquals(1, nsg.securityRules().size()); nsg = primaryIpConfig.getNetworkSecurityGroup(); Assertions.assertEquals("nsg", nsg.name()); } @Test public void canCreateVirtualMachine() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertNotNull(foundVM.timeCreated()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotCreateVirtualMachineSyncPoll() throws Exception { final String mySqlInstallScript = "https: final String installCommand = "bash install_mysql_server_5.6.sh Abc.123x("; Assertions.assertThrows(IllegalStateException.class, () -> { Accepted<VirtualMachine> acceptedVirtualMachine = this.computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .defineNewExtension("CustomScriptForLinux") .withPublisher("Microsoft.OSTCExtensions") .withType("CustomScriptForLinux") .withVersion("1.4") .withMinorVersionAutoUpgrade() .withPublicSetting("fileUris", Collections.singletonList(mySqlInstallScript)) .withPublicSetting("commandToExecute", installCommand) .attach() .beginCreate(); }); boolean dependentResourceCreated = computeManager.resourceManager().serviceClient().getResourceGroups().checkExistence(rgName); Assertions.assertFalse(dependentResourceCreated); rgName = null; } @Test public void canCreateVirtualMachineSyncPoll() throws Exception { final long defaultDelayInMillis = 10 * 1000; Accepted<VirtualMachine> acceptedVirtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .beginCreate(); VirtualMachine createdVirtualMachine = acceptedVirtualMachine.getActivationResponse().getValue(); Assertions.assertNotEquals("Succeeded", createdVirtualMachine.provisioningState()); LongRunningOperationStatus pollStatus = acceptedVirtualMachine.getActivationResponse().getStatus(); long delayInMills = acceptedVirtualMachine.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : acceptedVirtualMachine.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedVirtualMachine.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : pollResponse.getRetryAfter().toMillis(); } Assertions.assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollStatus); VirtualMachine virtualMachine = acceptedVirtualMachine.getFinalResult(); Assertions.assertEquals("Succeeded", virtualMachine.provisioningState()); Accepted<Void> acceptedDelete = computeManager.virtualMachines() .beginDeleteByResourceGroup(virtualMachine.resourceGroupName(), virtualMachine.name()); pollStatus = acceptedDelete.getActivationResponse().getStatus(); delayInMills = acceptedDelete.getActivationResponse().getRetryAfter() == null ? defaultDelayInMillis : (int) acceptedDelete.getActivationResponse().getRetryAfter().toMillis(); while (!pollStatus.isComplete()) { ResourceManagerUtils.sleep(Duration.ofMillis(delayInMills)); PollResponse<?> pollResponse = acceptedDelete.getSyncPoller().poll(); pollStatus = pollResponse.getStatus(); delayInMills = pollResponse.getRetryAfter() == null ? defaultDelayInMillis : (int) pollResponse.getRetryAfter().toMillis(); } boolean deleted = false; try { computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException e) { if (e.getResponse().getStatusCode() == 404 && ("NotFound".equals(e.getValue().getCode()) || "ResourceNotFound".equals(e.getValue().getCode()))) { deleted = true; } } Assertions.assertTrue(deleted); } @Test public void canCreateUpdatePriorityAndPrice() throws Exception { computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2016_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLowPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withMaxPrice(1000.0) .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(region, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); Assertions.assertEquals((Double) 1000.0, foundVM.billingProfile().maxPrice()); Assertions.assertEquals(VirtualMachineEvictionPolicyTypes.DEALLOCATE, foundVM.evictionPolicy()); try { foundVM.update().withMaxPrice(1500.0).apply(); Assertions.assertEquals((Double) 1500.0, foundVM.billingProfile().maxPrice()); Assertions.fail(); } catch (ManagementException e) { } foundVM.deallocate(); foundVM.update().withMaxPrice(2000.0).apply(); foundVM.start(); Assertions.assertEquals((Double) 2000.0, foundVM.billingProfile().maxPrice()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.SPOT).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.SPOT, foundVM.priority()); foundVM = foundVM.update().withPriority(VirtualMachinePriorityTypes.LOW).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.LOW, foundVM.priority()); try { foundVM.update().withPriority(VirtualMachinePriorityTypes.REGULAR).apply(); Assertions.assertEquals(VirtualMachinePriorityTypes.REGULAR, foundVM.priority()); Assertions.fail(); } catch (ManagementException e) { } computeManager.virtualMachines().deleteById(foundVM.id()); } @Test public void cannotUpdateProximityPlacementGroupForVirtualMachine() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); AvailabilitySet setCreated2 = computeManager .availabilitySets() .define(availabilitySetName2) .withRegion(regionProxPlacementGroup2) .withNewResourceGroup(rgName2) .withNewProximityPlacementGroup(proxGroupName2, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName2, setCreated2.name()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated2.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated2.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated2.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated2.id().equalsIgnoreCase(setCreated2.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated2.regionName(), setCreated2.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); try { VirtualMachine updatedVm = foundVM.update().withProximityPlacementGroup(setCreated2.proximityPlacementGroup().id()).apply(); } catch (ManagementException clEx) { Assertions .assertTrue( clEx .getMessage() .contains( "Updating proximity placement group of VM javavm is not allowed while the VM is running." + " Please stop/deallocate the VM and retry the operation.")); } computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndAvailabilitySetInSameProximityPlacementGroup() throws Exception { AvailabilitySet setCreated = computeManager .availabilitySets() .define(availabilitySetName) .withRegion(regionProxPlacementGroup) .withNewResourceGroup(rgName) .withNewProximityPlacementGroup(proxGroupName, proxGroupType) .create(); Assertions.assertEquals(availabilitySetName, setCreated.name()); Assertions.assertNotNull(setCreated.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, setCreated.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(setCreated.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(setCreated.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertEquals(setCreated.regionName(), setCreated.proximityPlacementGroup().location()); computeManager .virtualMachines() .define(vmName) .withRegion(regionProxPlacementGroup) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withProximityPlacementGroup(setCreated.proximityPlacementGroup().id()) .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withUnmanagedDisks() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .withOSDiskName("javatest") .withLicenseType("Windows_Server") .create(); VirtualMachine foundVM = null; PagedIterable<VirtualMachine> vms = computeManager.virtualMachines().listByResourceGroup(rgName); for (VirtualMachine vm1 : vms) { if (vm1.name().equals(vmName)) { foundVM = vm1; break; } } Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); foundVM = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(foundVM); Assertions.assertEquals(regionProxPlacementGroup, foundVM.region()); Assertions.assertEquals("Windows_Server", foundVM.licenseType()); PowerState powerState = foundVM.powerState(); Assertions.assertEquals(powerState, PowerState.RUNNING); VirtualMachineInstanceView instanceView = foundVM.instanceView(); Assertions.assertNotNull(instanceView); Assertions.assertNotNull(instanceView.statuses().size() > 0); Assertions.assertNotNull(foundVM.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, foundVM.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(foundVM.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(foundVM.proximityPlacementGroup().availabilitySetIds().get(0))); Assertions.assertNotNull(foundVM.proximityPlacementGroup().virtualMachineIds()); Assertions.assertFalse(foundVM.proximityPlacementGroup().virtualMachineIds().isEmpty()); Assertions .assertTrue(foundVM.id().equalsIgnoreCase(setCreated.proximityPlacementGroup().virtualMachineIds().get(0))); VirtualMachine updatedVm = foundVM.update().withoutProximityPlacementGroup().apply(); Assertions.assertNotNull(updatedVm.proximityPlacementGroup()); Assertions.assertEquals(proxGroupType, updatedVm.proximityPlacementGroup().proximityPlacementGroupType()); Assertions.assertNotNull(updatedVm.proximityPlacementGroup().availabilitySetIds()); Assertions.assertFalse(updatedVm.proximityPlacementGroup().availabilitySetIds().isEmpty()); Assertions .assertTrue( setCreated.id().equalsIgnoreCase(updatedVm.proximityPlacementGroup().availabilitySetIds().get(0))); computeManager.virtualMachines().deleteById(foundVM.id()); computeManager.availabilitySets().deleteById(setCreated.id()); } @Test public void canCreateVirtualMachinesAndRelatedResourcesInParallel() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; List<String> networkCreatableKeys = creatablesInfo.networkCreatableKeys; List<String> publicIpCreatableKeys = creatablesInfo.publicIpCreatableKeys; CreatedResources<VirtualMachine> createdVirtualMachines = computeManager.virtualMachines().create(virtualMachineCreatables); Assertions.assertTrue(createdVirtualMachines.size() == count); Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } for (VirtualMachine virtualMachine : createdVirtualMachines.values()) { Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } for (String networkCreatableKey : networkCreatableKeys) { Network createdNetwork = (Network) createdVirtualMachines.createdRelatedResource(networkCreatableKey); Assertions.assertNotNull(createdNetwork); Assertions.assertTrue(networkNames.contains(createdNetwork.name())); } Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } for (String publicIpCreatableKey : publicIpCreatableKeys) { PublicIpAddress createdPublicIpAddress = (PublicIpAddress) createdVirtualMachines.createdRelatedResource(publicIpCreatableKey); Assertions.assertNotNull(createdPublicIpAddress); Assertions.assertTrue(publicIPAddressNames.contains(createdPublicIpAddress.name())); } } @Test public void canStreamParallelCreatedVirtualMachinesAndRelatedResources() throws Exception { String vmNamePrefix = "vmz"; String publicIpNamePrefix = generateRandomResourceName("pip-", 15); String networkNamePrefix = generateRandomResourceName("vnet-", 15); int count = 5; final Set<String> virtualMachineNames = new HashSet<>(); for (int i = 0; i < count; i++) { virtualMachineNames.add(String.format("%s-%d", vmNamePrefix, i)); } final Set<String> networkNames = new HashSet<>(); for (int i = 0; i < count; i++) { networkNames.add(String.format("%s-%d", networkNamePrefix, i)); } final Set<String> publicIPAddressNames = new HashSet<>(); for (int i = 0; i < count; i++) { publicIPAddressNames.add(String.format("%s-%d", publicIpNamePrefix, i)); } final CreatablesInfo creatablesInfo = prepareCreatableVirtualMachines(region, vmNamePrefix, networkNamePrefix, publicIpNamePrefix, count); final AtomicInteger resourceCount = new AtomicInteger(0); List<Creatable<VirtualMachine>> virtualMachineCreatables = creatablesInfo.virtualMachineCreatables; computeManager .virtualMachines() .createAsync(virtualMachineCreatables) .map( createdResource -> { if (createdResource instanceof Resource) { Resource resource = (Resource) createdResource; System.out.println("Created: " + resource.id()); if (resource instanceof VirtualMachine) { VirtualMachine virtualMachine = (VirtualMachine) resource; Assertions.assertTrue(virtualMachineNames.contains(virtualMachine.name())); Assertions.assertNotNull(virtualMachine.id()); } else if (resource instanceof Network) { Network network = (Network) resource; Assertions.assertTrue(networkNames.contains(network.name())); Assertions.assertNotNull(network.id()); } else if (resource instanceof PublicIpAddress) { PublicIpAddress publicIPAddress = (PublicIpAddress) resource; Assertions.assertTrue(publicIPAddressNames.contains(publicIPAddress.name())); Assertions.assertNotNull(publicIPAddress.id()); } } resourceCount.incrementAndGet(); return createdResource; }) .blockLast(); networkNames.forEach(name -> { Assertions.assertNotNull(networkManager.networks().getByResourceGroup(rgName, name)); }); publicIPAddressNames.forEach(name -> { Assertions.assertNotNull(networkManager.publicIpAddresses().getByResourceGroup(rgName, name)); }); Assertions.assertEquals(1, storageManager.storageAccounts().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, networkManager.networkInterfaces().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(count, resourceCount.get()); } @Test public void canSetStorageAccountForUnmanagedDisk() { final String storageName = generateRandomResourceName("st", 14); StorageAccount storageAccount = storageManager .storageAccounts() .define(storageName) .withRegion(region) .withNewResourceGroup(rgName) .withSku(StorageAccountSkuType.PREMIUM_LRS) .create(); VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .defineUnmanagedDataDisk("disk1") .withNewVhd(100) .withLun(2) .storeAt(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .attach() .defineUnmanagedDataDisk("disk2") .withNewVhd(100) .withLun(3) .storeAt(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .attach() .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .withOSDiskCaching(CachingTypes.READ_WRITE) .create(); Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); VirtualMachineUnmanagedDataDisk firstUnmanagedDataDisk = unmanagedDataDisks.get(2); Assertions.assertNotNull(firstUnmanagedDataDisk); VirtualMachineUnmanagedDataDisk secondUnmanagedDataDisk = unmanagedDataDisks.get(3); Assertions.assertNotNull(secondUnmanagedDataDisk); String createdVhdUri1 = firstUnmanagedDataDisk.vhdUri(); String createdVhdUri2 = secondUnmanagedDataDisk.vhdUri(); Assertions.assertNotNull(createdVhdUri1); Assertions.assertNotNull(createdVhdUri2); computeManager.virtualMachines().deleteById(virtualMachine.id()); virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk1vhd.vhd") .withSize(VirtualMachineSizeTypes.fromString("Standard_D2as_v4")) .create(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(1, unmanagedDataDisks.size()); firstUnmanagedDataDisk = null; for (VirtualMachineUnmanagedDataDisk unmanagedDisk : unmanagedDataDisks.values()) { firstUnmanagedDataDisk = unmanagedDisk; break; } Assertions.assertNotNull(firstUnmanagedDataDisk.vhdUri()); Assertions.assertTrue(firstUnmanagedDataDisk.vhdUri().equalsIgnoreCase(createdVhdUri1)); virtualMachine .update() .withExistingUnmanagedDataDisk(storageAccount.name(), "diskvhds", "datadisk2vhd.vhd") .apply(); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); unmanagedDataDisks = virtualMachine.unmanagedDataDisks(); Assertions.assertNotNull(unmanagedDataDisks); Assertions.assertEquals(2, unmanagedDataDisks.size()); } @Test public void canUpdateTagsOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); virtualMachine.update().withTag("test", "testValue").apply(); Assertions.assertEquals("testValue", virtualMachine.innerModel().tags().get("test")); Map<String, String> testTags = new HashMap<String, String>(); testTags.put("testTag", "testValue"); virtualMachine.update().withTags(testTags).apply(); Assertions.assertEquals(testTags.get("testTag"), virtualMachine.innerModel().tags().get("testTag")); } @Test public void canRunScriptOnVM() { VirtualMachine virtualMachine = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .create(); List<String> installGit = new ArrayList<>(); installGit.add("sudo apt-get update"); installGit.add("sudo apt-get install -y git"); RunCommandResult runResult = virtualMachine.runShellScript(installGit, new ArrayList<RunCommandInputParameter>()); Assertions.assertNotNull(runResult); Assertions.assertNotNull(runResult.value()); Assertions.assertTrue(runResult.value().size() > 0); } @Test @DoNotRecord(skipInPlayback = true) public void canPerformSimulateEvictionOnSpotVirtualMachine() { VirtualMachine virtualMachine = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("firstuser") .withSsh(sshPublicKey()) .withSpotPriority(VirtualMachineEvictionPolicyTypes.DEALLOCATE) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .create(); Assertions.assertNotNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertTrue(virtualMachine.osDiskSize() > 0); Disk disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertNotNull(disk); Assertions.assertEquals(DiskState.ATTACHED, disk.innerModel().diskState()); virtualMachine.simulateEviction(); boolean deallocated = false; int pollIntervalInMinutes = 5; for (int i = 0; i < 30; i += pollIntervalInMinutes) { ResourceManagerUtils.sleep(Duration.ofMinutes(pollIntervalInMinutes)); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); if (virtualMachine.powerState() == PowerState.DEALLOCATED) { deallocated = true; break; } } Assertions.assertTrue(deallocated); virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); Assertions.assertNotNull(virtualMachine); Assertions.assertNull(virtualMachine.osDiskStorageAccountType()); Assertions.assertEquals(0, virtualMachine.osDiskSize()); disk = computeManager.disks().getById(virtualMachine.osDiskId()); Assertions.assertEquals(DiskState.RESERVED, disk.innerModel().diskState()); } @Test public void canForceDeleteVirtualMachine() { computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2012_R2_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .create(); VirtualMachine virtualMachine = computeManager.virtualMachines().getByResourceGroup(rgName, vmName); Assertions.assertNotNull(virtualMachine); Assertions.assertEquals(Region.fromName("eastus2euap"), virtualMachine.region()); String nicId = virtualMachine.primaryNetworkInterfaceId(); computeManager.virtualMachines().deleteById(virtualMachine.id(), true); try { virtualMachine = computeManager.virtualMachines().getById(virtualMachine.id()); } catch (ManagementException ex) { virtualMachine = null; Assertions.assertEquals(404, ex.getResponse().getStatusCode()); } Assertions.assertNull(virtualMachine); NetworkInterface nic = networkManager.networkInterfaces().getById(nicId); Assertions.assertNotNull(nic); } @Test public void canCreateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; final String publicIpDnsLabel = generateRandomResourceName("pip", 20); Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIpDnsLabel/*, DeleteOptions.DELETE*/) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm1.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm1.dataDisks().get(1).deleteOptions()); Assertions.assertEquals(vm1.id(), computeManager.virtualMachines().getById(vm1.id()).id()); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); PublicIpAddress publicIpAddress = computeManager.networkManager().publicIpAddresses().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.networkManager().publicIpAddresses().deleteById(publicIpAddress.id()); String secondaryNicName = generateRandomResourceName("nic", 10); Creatable<NetworkInterface> secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable, DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.resourceManager().genericResources().listByResourceGroup(rgName).stream().count()); secondaryNetworkInterfaceCreatable = this .networkManager .networkInterfaces() .define(secondaryNicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic(); VirtualMachine vm3 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withNewDataDisk(computeManager.disks() .define("datadisk2") .withRegion(region) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10)) .withNewSecondaryNetworkInterface(secondaryNetworkInterfaceCreatable) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(DeleteOptions.DETACH, vm3.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(0).deleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm3.dataDisks().get(1).deleteOptions()); computeManager.virtualMachines().deleteById(vm3.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(3, computeManager.disks().listByResourceGroup(rgName).stream().count()); Assertions.assertEquals(2, computeManager.networkManager().networkInterfaces().listByResourceGroup(rgName).stream().count()); } @Test public void canUpdateVirtualMachineWithDeleteOption() throws Exception { Region region = Region.US_WEST3; Network network = this .networkManager .networks() .define("network1") .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("default", "10.0.0.0/24") .create(); VirtualMachine vm1 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm1.update() .withNewDataDisk(10) .apply(); computeManager.virtualMachines().deleteById(vm1.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(1, computeManager.disks().listByResourceGroup(rgName).stream().count()); Disk disk = computeManager.disks().listByResourceGroup(rgName).stream().findFirst().get(); computeManager.disks().deleteById(disk.id()); VirtualMachine vm2 = computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("default") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("testuser") .withSsh(sshPublicKey()) .withNewDataDisk(10) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); vm2.update() .withNewDataDisk(10) .withDataDiskDefaultDeleteOptions(DeleteOptions.DELETE) .apply(); computeManager.virtualMachines().deleteById(vm2.id()); ResourceManagerUtils.sleep(Duration.ofSeconds(10)); Assertions.assertEquals(2, computeManager.disks().listByResourceGroup(rgName).stream().count()); } @Test public void canHibernateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion("eastus2euap") .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER) .withAdminUsername("Foo12") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.STANDARD_D2S_V3) .enableHibernation() .create(); Assertions.assertTrue(vm.isHibernationEnabled()); vm.deallocate(true); InstanceViewStatus hibernationStatus = vm.instanceView().statuses().stream() .filter(status -> "HibernationState/Hibernated".equals(status.code())) .findFirst().orElse(null); Assertions.assertNotNull(hibernationStatus); vm.start(); vm.deallocate(); vm.update() .disableHibernation() .apply(); Assertions.assertFalse(vm.isHibernationEnabled()); } @Test public void canOperateVirtualMachine() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_A1_V2) .create(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.redeploy(); vm.powerOff(true); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.STOPPED, vm.powerState()); vm.start(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.restart(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.RUNNING, vm.powerState()); vm.deallocate(); vm.refreshInstanceView(); Assertions.assertEquals(PowerState.DEALLOCATED, vm.powerState()); } @Test public void canCreateVirtualMachineWithEphemeralOSDisk() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withEphemeralOSDisk() .withPlacement(DiffDiskPlacement.CACHE_DISK) .withNewDataDisk(1, 1, CachingTypes.READ_WRITE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertNull(vm.osDiskDiskEncryptionSetId()); Assertions.assertTrue(vm.osDiskSize() > 0); Assertions.assertEquals(vm.osDiskDeleteOptions(), DeleteOptions.DELETE); Assertions.assertEquals(vm.osDiskCachingType(), CachingTypes.READ_ONLY); Assertions.assertFalse(CoreUtils.isNullOrEmpty(vm.dataDisks())); Assertions.assertTrue(vm.isOSDiskEphemeral()); Assertions.assertNotNull(vm.osDiskId()); String osDiskId = vm.osDiskId(); vm.update() .withoutDataDisk(1) .withNewDataDisk(1, 2, CachingTypes.NONE) .withNewDataDisk(1) .apply(); Assertions.assertEquals(vm.dataDisks().size(), 2); vm.powerOff(); vm.start(); vm.refresh(); Assertions.assertEquals(osDiskId, vm.osDiskId()); Assertions.assertThrows(Exception.class, vm::deallocate); } @Test public void canCreateVirtualMachineWithExistingScaleSet() throws Exception { final String vmssName = generateRandomResourceName("vmss", 10); Network network = this .networkManager .networks() .define("vmssvnet") .withRegion(region.name()) .withNewResourceGroup(rgName) .withAddressSpace("10.0.0.0/28") .withSubnet("subnet1", "10.0.0.0/28") .create(); ResourceGroup resourceGroup = this.resourceManager.resourceGroups().getByName(rgName); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, resourceGroup, "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet flexibleVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_DS1_V2) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(1) .withUpgradeMode(UpgradeMode.AUTOMATIC) .create(); String regularVMName = generateRandomResourceName("vm", 10); final String pipDnsLabel = generateRandomResourceName("pip", 10); VirtualMachine regularVM = this.computeManager .virtualMachines() .define(regularVMName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(pipDnsLabel) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser2") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create(); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.id(), regularVM.virtualMachineScaleSetId()); Assertions.assertEquals(2, flexibleVMSS.capacity()); regularVM.deallocate(); Assertions.assertEquals(regularVM.powerState(), PowerState.DEALLOCATED); this.computeManager .virtualMachines().deleteById(regularVM.id()); flexibleVMSS.refresh(); Assertions.assertEquals(flexibleVMSS.capacity(), 1); final String storageAccountName = generateRandomResourceName("stg", 17); Assertions.assertThrows( ApiErrorException.class, () -> computeManager .virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withLatestLinuxImage("Canonical", "UbuntuServer", "14.04.2-LTS") .withRootUsername("jvuser3") .withSsh(sshPublicKey()) .withUnmanagedDisks() /* UN-MANAGED OS and DATA DISKS */ .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .withNewStorageAccount(storageAccountName) .withOSDiskCaching(CachingTypes.READ_WRITE) .withExistingVirtualMachineScaleSet(flexibleVMSS) .create() ); final String vmssName2 = generateRandomResourceName("vmss", 10); Network network2 = this .networkManager .networks() .define("vmssvnet2") .withRegion(region.name()) .withExistingResourceGroup(rgName) .withAddressSpace("192.168.0.0/28") .withSubnet("subnet2", "192.168.0.0/28") .create(); LoadBalancer publicLoadBalancer2 = createHttpLoadBalancers(region, resourceGroup, "2", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet uniformVMSS = this.computeManager .virtualMachineScaleSets() .define(vmssName2) .withRegion(region) .withNewResourceGroup(rgName) .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network2, "subnet2") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer2) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser4") .withSsh(sshPublicKey()) .withCapacity(1) .create(); Assertions.assertTrue(uniformVMSS.virtualMachines().list().stream().allMatch(v -> v.instanceId() != null)); String regularVMName2 = generateRandomResourceName("vm", 10); Assertions.assertThrows( ApiErrorException.class, () -> this.computeManager .virtualMachines() .define(regularVMName2) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_18_04_LTS) .withRootUsername("jvuser5") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(uniformVMSS) .create() ); } @Test @DoNotRecord(skipInPlayback = true) public void canSwapOSDiskWithManagedDisk() { String storageAccountName = generateRandomResourceName("sa", 15); StorageAccount storageAccount = this.storageManager .storageAccounts() .define(storageAccountName) .withRegion(region) .withNewResourceGroup(rgName) .create(); String vm1Name = generateRandomResourceName("vm", 15); VirtualMachine vm1 = this.computeManager .virtualMachines() .define(vm1Name) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, CachingTypes.READ_WRITE) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .withExistingStorageAccount(storageAccount) .create(); Disk vm1OSDisk = this.computeManager.disks().getById(vm1.osDiskId()); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_PLATFORM_KEY, vm1OSDisk.encryption().type()); String vaultName = generateRandomResourceName("vault", 15); Vault vault = this.keyVaultManager .vaults() .define(vaultName) .withRegion(region) .withExistingResourceGroup(rgName) .defineAccessPolicy() .forServicePrincipal(clientIdFromFile()) .allowKeyPermissions(KeyPermissions.CREATE) .attach() .create(); String keyName = generateRandomResourceName("key", 15); Key key = vault.keys() .define(keyName) .withKeyTypeToCreate(KeyType.RSA) .withKeySize(4096) .create(); String desName = generateRandomResourceName("des", 15); DiskEncryptionSet des = this.computeManager.diskEncryptionSets() .define(desName) .withRegion(region) .withExistingResourceGroup(rgName) .withEncryptionType(DiskEncryptionSetType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY) .withExistingKeyVault(vault.id()) .withExistingKey(key.id()) .withSystemAssignedManagedServiceIdentity() .create(); vault.update() .defineAccessPolicy() .forObjectId(des.systemAssignedManagedServiceIdentityPrincipalId()) .allowKeyPermissions(KeyPermissions.GET, KeyPermissions.UNWRAP_KEY, KeyPermissions.WRAP_KEY) .attach() .withPurgeProtectionEnabled() .apply(); String vm2Name = generateRandomResourceName("vm", 15); VirtualMachine vm2 = this.computeManager.virtualMachines() .define(vm2Name) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withOSDiskDiskEncryptionSet(des.id()) .withOSDiskDeleteOptions(DeleteOptions.DETACH) .create(); String vm2OSDiskId = vm2.osDiskId(); this.computeManager.virtualMachines().deleteById(vm2.id()); Disk vm2OSDisk = this.computeManager.disks().getById(vm2OSDiskId); Assertions.assertEquals(EncryptionType.ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY, vm2OSDisk.encryption().type()); vm1.deallocate(); vm1.update() .withOSDisk(vm2OSDiskId) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm2OSDiskId); Assertions.assertTrue(des.id().equalsIgnoreCase(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet().id())); vm1.deallocate(); vm1.update() .withOSDisk(vm1OSDisk) .apply(); vm1.start(); vm1.refresh(); Assertions.assertEquals(vm1.osDiskId(), vm1OSDisk.id()); Assertions.assertNull(vm1.storageProfile().osDisk().managedDisk().diskEncryptionSet()); } @Test public void canCRUDTrustedLaunchVM() { VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(Region.US_WEST3) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withTrustedLaunch() .withSecureBoot() .withVTpm() .withSize(VirtualMachineSizeTypes.STANDARD_DS1_V2) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertTrue(vm.isSecureBootEnabled()); Assertions.assertTrue(vm.isVTpmEnabled()); vm.update() .withoutSecureBoot() .withoutVTpm() .applyAsync() .flatMap(VirtualMachine::restartAsync) .block(); ResourceManagerUtils.sleep(Duration.ofMinutes(1)); vm = computeManager.virtualMachines().getById(vm.id()); Assertions.assertEquals(SecurityTypes.TRUSTED_LAUNCH, vm.securityType()); Assertions.assertFalse(vm.isSecureBootEnabled()); Assertions.assertFalse(vm.isVTpmEnabled()); computeManager.virtualMachines().deleteById(vm.id()); } @Test public void canUpdateDeleteOptions() { String networkName = generateRandomResourceName("network", 15); String nicName = generateRandomResourceName("nic", 15); String nicName2 = generateRandomResourceName("nic", 15); Network network = this .networkManager .networks() .define(networkName) .withRegion(region) .withNewResourceGroup(rgName) .withAddressSpace("10.0.1.0/24") .withSubnet("subnet1", "10.0.1.0/28") .withSubnet("subnet2", "10.0.1.16/28") .create(); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_20_04_LTS_GEN2) .withRootUsername("Foo12") .withSsh(sshPublicKey()) .withNewDataDisk(10, 1, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DELETE)) .withSize(VirtualMachineSizeTypes.STANDARD_DS3_V2) .withNewSecondaryNetworkInterface(this .networkManager .networkInterfaces() .define(nicName) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet1") .withPrimaryPrivateIPAddressDynamic(), DeleteOptions.DELETE) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withOSDiskDeleteOptions(DeleteOptions.DELETE) .create(); Assertions.assertEquals(DeleteOptions.DELETE, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); vm.update() .withOsDiskDeleteOptions(DeleteOptions.DETACH) .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DETACH) .withDataDisksDeleteOptions(DeleteOptions.DETACH, 1) .apply(); Assertions.assertEquals(DeleteOptions.DETACH, vm.osDiskDeleteOptions()); Assertions.assertEquals(DeleteOptions.DETACH, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream() .filter(nicId -> !nicId.equals(vm.primaryNetworkInterfaceId())).allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DETACH.equals(disk.deleteOptions()))); NetworkInterface secondaryNic2 = this .networkManager .networkInterfaces() .define(nicName2) .withRegion(region) .withExistingResourceGroup(rgName) .withExistingPrimaryNetwork(network) .withSubnet("subnet2") .withPrimaryPrivateIPAddressDynamic() .create(); vm.powerOff(); vm.deallocate(); vm.update() .withNewDataDisk(1, 2, new VirtualMachineDiskOptions().withDeleteOptions(DeleteOptions.DETACH)) .withExistingSecondaryNetworkInterface(secondaryNic2) .apply(); vm.update() .withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions.DELETE) .withDataDisksDeleteOptions(DeleteOptions.DELETE, new ArrayList<>(vm.dataDisks().keySet()).toArray(new Integer[0])) .withNetworkInterfacesDeleteOptions( DeleteOptions.DELETE, vm.networkInterfaceIds().stream().filter(nic -> !nic.equals(vm.primaryNetworkInterfaceId())).toArray(String[]::new)) .apply(); Assertions.assertEquals(DeleteOptions.DELETE, vm.primaryNetworkInterfaceDeleteOptions()); Assertions.assertTrue(vm.networkInterfaceIds().stream().allMatch(nicId -> DeleteOptions.DELETE.equals(vm.networkInterfaceDeleteOptions(nicId)))); Assertions.assertTrue(vm.dataDisks().values().stream().allMatch(disk -> DeleteOptions.DELETE.equals(disk.deleteOptions()))); } @Test public void testListVmByVmssId() { String vmssName = generateRandomResourceName("vmss", 15); String vmName = generateRandomResourceName("vm", 15); String vmName2 = generateRandomResourceName("vm", 15); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withNewResourceGroup(rgName) .withFlexibleOrchestrationMode() .create(); Assertions.assertEquals(0, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); VirtualMachine vm = computeManager.virtualMachines() .define(vmName) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withExistingVirtualMachineScaleSet(vmss) .create(); Assertions.assertNotNull(vm.virtualMachineScaleSetId()); VirtualMachine vm2 = computeManager.virtualMachines() .define(vmName2) .withRegion(region) .withExistingResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.16/28") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Assertions.assertNull(vm2.virtualMachineScaleSetId()); Assertions.assertEquals(1, computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().count()); Assertions.assertTrue(vm.id().equalsIgnoreCase(computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()).stream().iterator().next().id())); Assertions.assertEquals(2, computeManager.virtualMachines().listByResourceGroup(rgName).stream().count()); } @Test @DoNotRecord(skipInPlayback = true) @Disabled("This test is for listByVirtualMachineScaleSetId nextLink encoding. Backend pageSize may change, so we don't want to assert that.") public void testListByVmssIdNextLink() throws Exception { String vmssName = generateRandomResourceName("vmss", 15); String vnetName = generateRandomResourceName("vnet", 15); String vmName = generateRandomResourceName("vm", 15); int vmssCapacity = 70; computeManager.virtualMachines() .define(vmName) .withRegion(region) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.1.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .create(); Network network = networkManager.networks().define(vnetName) .withRegion(region) .withExistingResourceGroup(rgName) .withAddressSpace("10.0.0.0/24") .withSubnet("subnet1", "10.0.0.0/24") .create(); LoadBalancer publicLoadBalancer = createHttpLoadBalancers(region, this.resourceManager.resourceGroups().getByName(rgName), "1", LoadBalancerSkuType.STANDARD, PublicIPSkuType.STANDARD, true); VirtualMachineScaleSet vmss = computeManager.virtualMachineScaleSets() .define(vmssName) .withRegion(region) .withExistingResourceGroup(rgName) .withFlexibleOrchestrationMode() .withSku(VirtualMachineScaleSetSkuTypes.STANDARD_A0) .withExistingPrimaryNetworkSubnet(network, "subnet1") .withExistingPrimaryInternetFacingLoadBalancer(publicLoadBalancer) .withoutPrimaryInternalLoadBalancer() .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("jvuser") .withSsh(sshPublicKey()) .withCapacity(vmssCapacity) .create(); PagedIterable<VirtualMachine> vmPaged = computeManager.virtualMachines().listByVirtualMachineScaleSetId(vmss.id()); Iterable<PagedResponse<VirtualMachine>> vmIterable = vmPaged.iterableByPage(); int pageCount = 0; for (PagedResponse<VirtualMachine> response : vmIterable) { pageCount++; Assertions.assertEquals(200, response.getStatusCode()); } Assertions.assertEquals(vmssCapacity, vmPaged.stream().count()); Assertions.assertEquals(2, pageCount); } private CreatablesInfo prepareCreatableVirtualMachines( Region region, String vmNamePrefix, String networkNamePrefix, String publicIpNamePrefix, int vmCount) { Creatable<ResourceGroup> resourceGroupCreatable = resourceManager.resourceGroups().define(rgName).withRegion(region); Creatable<StorageAccount> storageAccountCreatable = storageManager .storageAccounts() .define(generateRandomResourceName("stg", 20)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); List<String> networkCreatableKeys = new ArrayList<>(); List<String> publicIpCreatableKeys = new ArrayList<>(); List<Creatable<VirtualMachine>> virtualMachineCreatables = new ArrayList<>(); for (int i = 0; i < vmCount; i++) { Creatable<Network> networkCreatable = networkManager .networks() .define(String.format("%s-%d", networkNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withAddressSpace("10.0.0.0/28"); networkCreatableKeys.add(networkCreatable.key()); Creatable<PublicIpAddress> publicIPAddressCreatable = networkManager .publicIpAddresses() .define(String.format("%s-%d", publicIpNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable); publicIpCreatableKeys.add(publicIPAddressCreatable.key()); Creatable<VirtualMachine> virtualMachineCreatable = computeManager .virtualMachines() .define(String.format("%s-%d", vmNamePrefix, i)) .withRegion(region) .withNewResourceGroup(resourceGroupCreatable) .withNewPrimaryNetwork(networkCreatable) .withPrimaryPrivateIPAddressDynamic() .withNewPrimaryPublicIPAddress(publicIPAddressCreatable) .withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS) .withRootUsername("tirekicker") .withSsh(sshPublicKey()) .withUnmanagedDisks() .withNewStorageAccount(storageAccountCreatable); virtualMachineCreatables.add(virtualMachineCreatable); } CreatablesInfo creatablesInfo = new CreatablesInfo(); creatablesInfo.virtualMachineCreatables = virtualMachineCreatables; creatablesInfo.networkCreatableKeys = networkCreatableKeys; creatablesInfo.publicIpCreatableKeys = publicIpCreatableKeys; return creatablesInfo; } class CreatablesInfo { private List<Creatable<VirtualMachine>> virtualMachineCreatables; List<String> networkCreatableKeys; List<String> publicIpCreatableKeys; } }
"net.peer.name" is part of the to be ignored standard metrics prefixes.
public void testAttributesOnCustomMetric() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleCounter counter = meter.counterBuilder("testAttributes").ofDoubles().build(); Attributes attributes = Attributes.builder() .put(NET_PEER_NAME, "example.io") .put(AttributeKey.stringKey("foo"), "bar") .build(); counter.add(1, attributes); meterProvider.forceFlush(); List<MetricData> metricDatas = inMemoryMetricExporter.getFinishedMetricItems(); MetricData metric = metricDatas.get(0); PointData pointData = metric.getData().getPoints().stream().findFirst().get(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metric, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(1); assertThat(properties.get(NET_PEER_NAME.getKey())).isNull(); assertThat(properties.get("foo")).isEqualTo("bar"); }
assertThat(properties.get(NET_PEER_NAME.getKey())).isNull();
public void testAttributesOnCustomMetric() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleCounter counter = meter.counterBuilder("testAttributes").ofDoubles().build(); Attributes attributes = Attributes.builder() .put(NET_PEER_NAME, "example.io") .put(AttributeKey.stringKey("foo"), "bar") .build(); counter.add(1, attributes); meterProvider.forceFlush(); List<MetricData> metricDatas = inMemoryMetricExporter.getFinishedMetricItems(); MetricData metric = metricDatas.get(0); PointData pointData = metric.getData().getPoints().stream().findFirst().get(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metric, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties.get(NET_PEER_NAME.getKey())).isEqualTo("example.io"); assertThat(properties.get("foo")).isEqualTo("bar"); }
class AzureMonitorMetricExporterTest { @Test public void testDoubleCounter() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleCounter counter = meter.counterBuilder("testDoubleCounter").ofDoubles().build(); counter.add(3.1415); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metricDataList.get(0), pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(3.1415); } assertThat(metricData.getType()).isEqualTo(DOUBLE_SUM); assertThat(metricData.getName()).isEqualTo("testDoubleCounter"); } @Test public void testDoubleGauge() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); meter .gaugeBuilder("testDoubleGauge") .setDescription("the current temperature") .setUnit("C") .buildWithCallback( m -> m.record(20.0, Attributes.of(AttributeKey.stringKey("thing"), "engine"))); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(20.0); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsEntry("thing", "engine"); } assertThat(metricData.getType()).isEqualTo(DOUBLE_GAUGE); assertThat(metricData.getName()).isEqualTo("testDoubleGauge"); } @Test @Test public void testAttributesOnStandardMetric() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleHistogram serverDuration = meter.histogramBuilder("http.server.duration").build(); Attributes attributes = Attributes.builder() .put(HTTP_STATUS_CODE, 200) .put(NET_HOST_NAME, "example.io") .put(AttributeKey.stringKey("foo"), "baz") .build(); serverDuration.record(0.1, attributes); meterProvider.forceFlush(); List<MetricData> metricDatas = inMemoryMetricExporter.getFinishedMetricItems(); MetricData metric = metricDatas.get(0); PointData pointData = metric.getData().getPoints().stream().findFirst().get(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metric, pointData, true, true); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(5); assertThat(properties.get("operation/synthetic")).isEqualTo("False"); assertThat(properties.get("Request.Success")).isEqualTo("True"); assertThat(properties.get("request/resultCode")).isEqualTo("200"); assertThat(properties.get("_MS.IsAutocollected")).isEqualTo("True"); assertThat(properties.get("_MS.MetricId")).isEqualTo("requests/duration"); } @Test public void testLongCounter() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); LongCounter counter = meter.counterBuilder("testLongCounter").build(); counter.add( 1, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "red")); counter.add( 2, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); counter.add( 1, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); counter.add( 2, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "green")); counter.add( 5, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "red")); counter.add( 4, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); @SuppressWarnings("unchecked") Collection<LongPointData> points = (Collection<LongPointData>) metricData.getData().getPoints(); assertThat(points.size()).isEqualTo(3); points = points.stream() .sorted(Comparator.comparing(LongPointData::getValue)) .collect(Collectors.toList()); Iterator<LongPointData> iterator = points.iterator(); LongPointData longPointData1 = iterator.next(); assertThat(longPointData1.getValue()).isEqualTo(2L); assertThat(longPointData1.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("apple"); assertThat(longPointData1.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("green"); LongPointData longPointData2 = iterator.next(); assertThat(longPointData2.getValue()).isEqualTo(6L); assertThat(longPointData2.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("apple"); assertThat(longPointData2.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("red"); LongPointData longPointData3 = iterator.next(); assertThat(longPointData3.getValue()).isEqualTo(7L); assertThat(longPointData3.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("lemon"); assertThat(longPointData3.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("yellow"); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData1, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); MetricDataPoint metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(2); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "apple"); assertThat(properties).containsEntry("color", "green"); builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData2, true, false); metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(6); properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "apple"); assertThat(properties).containsEntry("color", "red"); builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData3, true, false); metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(7); properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "lemon"); assertThat(properties).containsEntry("color", "yellow"); assertThat(metricData.getType()).isEqualTo(LONG_SUM); assertThat(metricData.getName()).isEqualTo("testLongCounter"); } @Test public void testLongGauge() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); meter .gaugeBuilder("testLongGauge") .ofLongs() .setDescription("the current temperature") .setUnit("C") .buildWithCallback( m -> { m.record(20, Attributes.of(AttributeKey.stringKey("thing"), "engine")); }); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(20); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsEntry("thing", "engine"); } assertThat(metricData.getType()).isEqualTo(LONG_GAUGE); assertThat(metricData.getName()).isEqualTo("testLongGauge"); } @Test public void testDoubleHistogram() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleHistogram doubleHistogram = meter .histogramBuilder("testDoubleHistogram") .setDescription("http.client.duration") .setUnit("ms") .build(); doubleHistogram.record(25.45); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); assertThat(metricData.getData().getPoints().size()).isEqualTo(1); PointData pointData = metricData.getData().getPoints().iterator().next(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getCount()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(25.45); assertThat(metricsData.getProperties()).isNull(); assertThat(metricsData.getMetrics().get(0).getMax()).isEqualTo(25.45); assertThat(metricsData.getMetrics().get(0).getMin()).isEqualTo(25.45); assertThat(metricData.getType()).isEqualTo(HISTOGRAM); assertThat(metricData.getName()).isEqualTo("testDoubleHistogram"); } @Test public void testNoAttributeWithPrefixApplicationInsightsInternal() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); LongCounter longCounter = meter.counterBuilder("testLongCounter") .setDescription("testLongCounter") .setUnit("1").build(); Attributes attributes = Attributes.of(AttributeKey.stringKey("applicationinsights.internal.test"), "test", AttributeKey.stringKey("foo"), "bar"); longCounter.add(1, attributes); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); assertThat(metricData.getData().getPoints().size()).isEqualTo(1); PointData pointData = metricData.getData().getPoints().iterator().next(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getProperties()).isNotNull(); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsExactly(entry("foo", "bar")); assertThat(metricsData.getProperties().get("applicationinsights.internal.test")).isNull(); assertThat(metricData.getType()).isEqualTo(LONG_SUM); assertThat(metricData.getName()).isEqualTo("testLongCounter"); } }
class AzureMonitorMetricExporterTest { @Test public void testDoubleCounter() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleCounter counter = meter.counterBuilder("testDoubleCounter").ofDoubles().build(); counter.add(3.1415); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metricDataList.get(0), pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(3.1415); } assertThat(metricData.getType()).isEqualTo(DOUBLE_SUM); assertThat(metricData.getName()).isEqualTo("testDoubleCounter"); } @Test public void testDoubleGauge() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); meter .gaugeBuilder("testDoubleGauge") .setDescription("the current temperature") .setUnit("C") .buildWithCallback( m -> m.record(20.0, Attributes.of(AttributeKey.stringKey("thing"), "engine"))); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(20.0); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsEntry("thing", "engine"); } assertThat(metricData.getType()).isEqualTo(DOUBLE_GAUGE); assertThat(metricData.getName()).isEqualTo("testDoubleGauge"); } @Test @Test public void testAttributesOnStandardMetric() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleHistogram serverDuration = meter.histogramBuilder("http.server.duration").build(); Attributes attributes = Attributes.builder() .put(HTTP_STATUS_CODE, 200) .put(NET_HOST_NAME, "example.io") .put(AttributeKey.stringKey("foo"), "baz") .build(); serverDuration.record(0.1, attributes); meterProvider.forceFlush(); List<MetricData> metricDatas = inMemoryMetricExporter.getFinishedMetricItems(); MetricData metric = metricDatas.get(0); PointData pointData = metric.getData().getPoints().stream().findFirst().get(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder( builder, metric, pointData, true, true); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(5); assertThat(properties.get("operation/synthetic")).isEqualTo("False"); assertThat(properties.get("Request.Success")).isEqualTo("True"); assertThat(properties.get("request/resultCode")).isEqualTo("200"); assertThat(properties.get("_MS.IsAutocollected")).isEqualTo("True"); assertThat(properties.get("_MS.MetricId")).isEqualTo("requests/duration"); } @Test public void testLongCounter() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); LongCounter counter = meter.counterBuilder("testLongCounter").build(); counter.add( 1, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "red")); counter.add( 2, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); counter.add( 1, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); counter.add( 2, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "green")); counter.add( 5, Attributes.of( AttributeKey.stringKey("name"), "apple", AttributeKey.stringKey("color"), "red")); counter.add( 4, Attributes.of( AttributeKey.stringKey("name"), "lemon", AttributeKey.stringKey("color"), "yellow")); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); @SuppressWarnings("unchecked") Collection<LongPointData> points = (Collection<LongPointData>) metricData.getData().getPoints(); assertThat(points.size()).isEqualTo(3); points = points.stream() .sorted(Comparator.comparing(LongPointData::getValue)) .collect(Collectors.toList()); Iterator<LongPointData> iterator = points.iterator(); LongPointData longPointData1 = iterator.next(); assertThat(longPointData1.getValue()).isEqualTo(2L); assertThat(longPointData1.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("apple"); assertThat(longPointData1.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("green"); LongPointData longPointData2 = iterator.next(); assertThat(longPointData2.getValue()).isEqualTo(6L); assertThat(longPointData2.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("apple"); assertThat(longPointData2.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("red"); LongPointData longPointData3 = iterator.next(); assertThat(longPointData3.getValue()).isEqualTo(7L); assertThat(longPointData3.getAttributes().get(AttributeKey.stringKey("name"))) .isEqualTo("lemon"); assertThat(longPointData3.getAttributes().get(AttributeKey.stringKey("color"))) .isEqualTo("yellow"); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData1, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); MetricDataPoint metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(2); Map<String, String> properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "apple"); assertThat(properties).containsEntry("color", "green"); builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData2, true, false); metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(6); properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "apple"); assertThat(properties).containsEntry("color", "red"); builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, longPointData3, true, false); metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); metricDataPoint = metricsData.getMetrics().get(0); assertThat(metricDataPoint.getValue()).isEqualTo(7); properties = metricsData.getProperties(); assertThat(properties.size()).isEqualTo(2); assertThat(properties).containsEntry("name", "lemon"); assertThat(properties).containsEntry("color", "yellow"); assertThat(metricData.getType()).isEqualTo(LONG_SUM); assertThat(metricData.getName()).isEqualTo("testLongCounter"); } @Test public void testLongGauge() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); meter .gaugeBuilder("testLongGauge") .ofLongs() .setDescription("the current temperature") .setUnit("C") .buildWithCallback( m -> { m.record(20, Attributes.of(AttributeKey.stringKey("thing"), "engine")); }); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); for (PointData pointData : metricData.getData().getPoints()) { MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(20); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsEntry("thing", "engine"); } assertThat(metricData.getType()).isEqualTo(LONG_GAUGE); assertThat(metricData.getName()).isEqualTo("testLongGauge"); } @Test public void testDoubleHistogram() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); DoubleHistogram doubleHistogram = meter .histogramBuilder("testDoubleHistogram") .setDescription("http.client.duration") .setUnit("ms") .build(); doubleHistogram.record(25.45); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); assertThat(metricData.getData().getPoints().size()).isEqualTo(1); PointData pointData = metricData.getData().getPoints().iterator().next(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getCount()).isEqualTo(1); assertThat(metricsData.getMetrics().get(0).getValue()).isEqualTo(25.45); assertThat(metricsData.getProperties()).isNull(); assertThat(metricsData.getMetrics().get(0).getMax()).isEqualTo(25.45); assertThat(metricsData.getMetrics().get(0).getMin()).isEqualTo(25.45); assertThat(metricData.getType()).isEqualTo(HISTOGRAM); assertThat(metricData.getName()).isEqualTo("testDoubleHistogram"); } @Test public void testNoAttributeWithPrefixApplicationInsightsInternal() { InMemoryMetricExporter inMemoryMetricExporter = InMemoryMetricExporter.create(); SdkMeterProvider meterProvider = SdkMeterProvider.builder() .registerMetricReader( PeriodicMetricReader.builder(inMemoryMetricExporter).build()) .build(); Meter meter = meterProvider.get("AzureMonitorMetricExporterTest"); LongCounter longCounter = meter.counterBuilder("testLongCounter") .setDescription("testLongCounter") .setUnit("1").build(); Attributes attributes = Attributes.of(AttributeKey.stringKey("applicationinsights.internal.test"), "test", AttributeKey.stringKey("foo"), "bar"); longCounter.add(1, attributes); meterProvider.forceFlush(); List<MetricData> metricDataList = inMemoryMetricExporter.getFinishedMetricItems(); assertThat(metricDataList).hasSize(1); MetricData metricData = metricDataList.get(0); assertThat(metricData.getData().getPoints().size()).isEqualTo(1); PointData pointData = metricData.getData().getPoints().iterator().next(); MetricTelemetryBuilder builder = MetricTelemetryBuilder.create(); MetricDataMapper.updateMetricPointBuilder(builder, metricData, pointData, true, false); MetricsData metricsData = (MetricsData) builder.build().getData().getBaseData(); assertThat(metricsData.getMetrics().size()).isEqualTo(1); assertThat(metricsData.getProperties()).isNotNull(); assertThat(metricsData.getProperties().size()).isEqualTo(1); assertThat(metricsData.getProperties()).containsExactly(entry("foo", "bar")); assertThat(metricsData.getProperties().get("applicationinsights.internal.test")).isNull(); assertThat(metricData.getType()).isEqualTo(LONG_SUM); assertThat(metricData.getName()).isEqualTo("testLongCounter"); } }
```suggestion return ORIGINAL_CONTENT.checkMatch( ORIGINAL_CONTENT.copySeekableByteChannelToFluxByteBuffer(channel), span).block().booleanValue(); ```
protected boolean runInternal(Context span) { try { BlobSeekableByteChannelReadResult result = syncClient.openSeekableByteChannelRead( new BlobSeekableByteChannelReadOptions(), null); SeekableByteChannel channel = result.getChannel(); return Boolean.TRUE.equals(ORIGINAL_CONTENT.checkMatch( ORIGINAL_CONTENT.copySeekableByteChannelToFluxByteBuffer(channel), span).block()); } catch (Exception e) { LOGGER.error("Failed to download blob with open seekable byte channel", e); return false; } }
ORIGINAL_CONTENT.copySeekableByteChannelToFluxByteBuffer(channel), span).block());
protected boolean runInternal(Context span) { try { BlobSeekableByteChannelReadResult result = syncClient.openSeekableByteChannelRead( new BlobSeekableByteChannelReadOptions(), null); SeekableByteChannel channel = result.getChannel(); return ORIGINAL_CONTENT.checkMatch(BinaryData.fromStream(Channels.newInputStream(channel)), span).block().booleanValue(); } catch (Exception e) { LOGGER.error("Failed to download blob with open seekable byte channel", e); return false; } }
class OpenSeekableByteChannelRead extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(OpenSeekableByteChannelRead.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(OpenSeekableByteChannelRead.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncNoFaultClient; public OpenSeekableByteChannelRead(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return null; } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class OpenSeekableByteChannelRead extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(OpenSeekableByteChannelRead.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(OpenSeekableByteChannelRead.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncNoFaultClient; public OpenSeekableByteChannelRead(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return monoError(LOGGER, new RuntimeException("openSeekableByteChannelRead() does not exist on the async client")); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
```suggestion return monoError(LOGGER, new NotSupportedException("openSeekableByteChannelRead() does not exist on the async client")) ```
protected Mono<Boolean> runInternalAsync(Context span) { return null; }
return null;
protected Mono<Boolean> runInternalAsync(Context span) { return monoError(LOGGER, new RuntimeException("openSeekableByteChannelRead() does not exist on the async client")); }
class OpenSeekableByteChannelRead extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(OpenSeekableByteChannelRead.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(OpenSeekableByteChannelRead.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncNoFaultClient; public OpenSeekableByteChannelRead(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); } @Override protected boolean runInternal(Context span) { try { BlobSeekableByteChannelReadResult result = syncClient.openSeekableByteChannelRead( new BlobSeekableByteChannelReadOptions(), null); SeekableByteChannel channel = result.getChannel(); return Boolean.TRUE.equals(ORIGINAL_CONTENT.checkMatch( ORIGINAL_CONTENT.copySeekableByteChannelToFluxByteBuffer(channel), span).block()); } catch (Exception e) { LOGGER.error("Failed to download blob with open seekable byte channel", e); return false; } } @Override @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class OpenSeekableByteChannelRead extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(OpenSeekableByteChannelRead.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(OpenSeekableByteChannelRead.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncNoFaultClient; public OpenSeekableByteChannelRead(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); } @Override protected boolean runInternal(Context span) { try { BlobSeekableByteChannelReadResult result = syncClient.openSeekableByteChannelRead( new BlobSeekableByteChannelReadOptions(), null); SeekableByteChannel channel = result.getChannel(); return ORIGINAL_CONTENT.checkMatch(BinaryData.fromStream(Channels.newInputStream(channel)), span).block().booleanValue(); } catch (Exception e) { LOGGER.error("Failed to download blob with open seekable byte channel", e); return false; } } @Override @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
When I implement your suggestion or remove the Boolean.TRUE.equals, I get the following warning: **Unboxing of 'ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent().toFluxByteBuffer(), span).block()' may produce 'NullPointerException'**
protected boolean runInternal(Context span) { try { return Boolean.TRUE.equals(ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent().toFluxByteBuffer(), span).block()); } catch (Exception e) { LOGGER.error("Failed to download blob", e); return false; } }
span).block());
protected boolean runInternal(Context span) { try { return ORIGINAL_CONTENT.checkMatch(syncClient.downloadContent(), span).block().booleanValue(); } catch (Exception e) { LOGGER.error("Failed to download blob", e); return false; } }
class DownloadContent extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadContent.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadContent.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadContent(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadContent().flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.toFluxByteBuffer(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class DownloadContent extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadContent.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadContent.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadContent(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadContent().flatMap(response -> ORIGINAL_CONTENT.checkMatch(response, span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
no objections on doing `toString()` later, but the comment is not accurate - this is behind `isEnabled`, which is populated based on the log level
public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; }
public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
why `logger.isDebugEnabled() ? throwable : null`? Logging consumers should (and do) decide what they do with exception depending on log level/other config.
public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } }
logger.isDebugEnabled() ? throwable : null);
public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
same - what to do with an exception object is consumer, not producer decision
public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; }
performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null);
public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
great catch!
static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); }
static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
True that this is behind the enable flag but if someone forgets to actually call log this does waste cycles.
public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; }
public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
In main right now if debug logging is not enabled null is passed for the throwable. It calls through this https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/core/azure-core/src/main/java/com/azure/core/util/logging/LoggingEventBuilder.java#L351 which on line 346 you can see the call to `removeThrowable`. We then log with `tuple.getThrowable()` which will be null.
public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } }
logger.isDebugEnabled() ? throwable : null);
public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; } /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
Same comment as the other case
public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; }
performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null);
public Throwable log(Throwable throwable) { Objects.requireNonNull(throwable, "'throwable' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, throwable), logger.isDebugEnabled() ? throwable : null); } return throwable; }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
class LoggingEventBuilder { private static final JsonStringEncoder JSON_STRING_ENCODER = JsonStringEncoder.getInstance(); private static final LoggingEventBuilder NOOP = new LoggingEventBuilder(null, null, null, false); private static final String AZURE_SDK_LOG_MESSAGE_JSON_START = "{\"az.sdk.message\":\""; private final Logger logger; private final LogLevel level; private List<ContextKeyValuePair> context; private final String globalContextCached; private final boolean hasGlobalContext; private final boolean isEnabled; /** * Creates {@code LoggingEventBuilder} for provided level and {@link ClientLogger}. * If level is disabled, returns no-op instance. */ static LoggingEventBuilder create(Logger logger, LogLevel level, String globalContextSerialized, boolean canLogAtLevel) { if (canLogAtLevel) { return new LoggingEventBuilder(logger, level, globalContextSerialized, true); } return NOOP; } private LoggingEventBuilder(Logger logger, LogLevel level, String globalContextSerialized, boolean isEnabled) { this.logger = logger; this.level = level; this.isEnabled = isEnabled; this.globalContextCached = globalContextSerialized == null ? "" : globalContextSerialized; this.hasGlobalContext = !this.globalContextCached.isEmpty(); } /** * Adds key with String value pair to the context of current log being created. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atInfo --> * <pre> * logger.atInfo& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atInfo --> * * @param key String key. * @param value String value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, String value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with Object value to the context of current log being created. * If logging is enabled at given level, and object is not null, uses {@code value.toString()} to * serialize object. * * <p><strong>Code samples</strong></p> * * <p>Adding string value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * & * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value Object value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Object value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with boolean value to the context of current log being created. * * @param key String key. * @param value boolean value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, boolean value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with long value to the context of current log event being created. * * <p><strong>Code samples</strong></p> * * <p>Adding an integer value to logging event context.</p> * * <!-- src_embed com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * <pre> * logger.atVerbose& * .addKeyValue& * .log& * </pre> * <!-- end com.azure.core.util.logging.clientlogger.atverbose.addKeyValue * * @param key String key. * @param value long value. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, long value) { if (this.isEnabled) { addKeyValueInternal(key, value); } return this; } /** * Adds key with String value supplier to the context of current log event being created. * * @param key String key. * @param valueSupplier String value supplier function. * @return The updated {@code LoggingEventBuilder} object. */ public LoggingEventBuilder addKeyValue(String key, Supplier<String> valueSupplier) { if (this.isEnabled) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, valueSupplier)); } return this; } /** * Logs message annotated with context. * * @param message the message to log. */ public void log(String message) { if (this.isEnabled) { performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. */ public void log(Supplier<String> messageSupplier) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, null), (Throwable) null); } } /** * Logs message annotated with context. * * @param messageSupplier string message supplier. * @param throwable {@link Throwable} for the message. */ public void log(Supplier<String> messageSupplier, Throwable throwable) { if (this.isEnabled) { String message = messageSupplier != null ? messageSupplier.get() : null; performLogging(level, getMessageWithContext(message, throwable), logger.isDebugEnabled() ? throwable : null); } } /** * Logs a format-able message that uses {@code {}} as the placeholder at {@code warning} log level. * * @param format The format-able message to log. * @param args Arguments for the message. If an exception is being logged, the last argument should be the {@link * Throwable}. */ public void log(String format, Object... args) { if (this.isEnabled) { performLogging(level, format, args); } } /** * Logs the {@link Throwable} and returns it to be thrown. * * @param throwable Throwable to be logged and returned. * @return The passed {@link Throwable}. * @throws NullPointerException If {@code throwable} is {@code null}. */ /** * Logs the {@link RuntimeException} and returns it to be thrown. * This API covers the cases where a checked exception type needs to be thrown and logged. * * @param runtimeException RuntimeException to be logged and returned. * @return The passed {@link RuntimeException}. * @throws NullPointerException If {@code runtimeException} is {@code null}. */ public RuntimeException log(RuntimeException runtimeException) { Objects.requireNonNull(runtimeException, "'runtimeException' cannot be null."); if (this.isEnabled) { performLogging(level, getMessageWithContext(null, runtimeException), logger.isDebugEnabled() ? runtimeException : null); } return runtimeException; } private String getMessageWithContext(String message, Throwable throwable) { if (message == null) { message = ""; } StringBuilder sb = new StringBuilder(20 + (context == null ? 0 : context.size()) * 20 + message.length() + globalContextCached.length()); sb.append(AZURE_SDK_LOG_MESSAGE_JSON_START); JSON_STRING_ENCODER.quoteAsString(message, sb); sb.append('"'); if (throwable != null) { sb.append(",\"exception\":"); String exceptionMessage = throwable.getMessage(); if (exceptionMessage != null) { sb.append('"'); JSON_STRING_ENCODER.quoteAsString(exceptionMessage, sb); sb.append('"'); } else { sb.append("null"); } } if (hasGlobalContext) { sb.append(',').append(globalContextCached); } if (context != null) { for (ContextKeyValuePair contextKeyValuePair : context) { contextKeyValuePair.write(sb.append(',')); } } sb.append('}'); return sb.toString(); } private void addKeyValueInternal(String key, Object value) { if (this.context == null) { this.context = new ArrayList<>(); } this.context.add(new ContextKeyValuePair(key, value)); } /* * Performs the logging. * * @param format format-able message. * @param args Arguments for the message, if an exception is being logged last argument is the throwable. */ private void performLogging(LogLevel logLevel, String format, Object... args) { Throwable throwable = null; if (doesArgsHaveThrowable(args)) { Object throwableObj = args[args.length - 1]; if (throwableObj instanceof Throwable) { throwable = (Throwable) throwableObj; } /* * Environment is logging at a level higher than verbose, strip out the throwable as it would log its * stack trace which is only expected when logging at a verbose level. */ if (!logger.isDebugEnabled()) { args = removeThrowable(args); } } FormattingTuple tuple = MessageFormatter.arrayFormat(format, args); String message = getMessageWithContext(tuple.getMessage(), throwable); performLogging(logLevel, message, tuple.getThrowable()); } private void performLogging(LogLevel logLevel, String message, Throwable throwable) { switch (logLevel) { case VERBOSE: logger.debug(message, throwable); break; case INFORMATIONAL: logger.info(message, throwable); break; case WARNING: logger.warn(message, throwable); break; case ERROR: logger.error(message, throwable); break; default: break; } } /** * Serializes passed map to string containing valid JSON fragment: * e.g. "k1":"v1","k2":"v2", properly escaped and without trailing comma. * <p> * For complex object serialization, it calls {@code toString()} guarded with null check. * * @param context to serialize. * * @return Serialized JSON fragment or an empty string. */ static String writeJsonFragment(Map<String, Object> context) { if (CoreUtils.isNullOrEmpty(context)) { return ""; } StringBuilder formatter = new StringBuilder(context.size() * 20); boolean firstValueWritten = false; for (Map.Entry<String, Object> pair : context.entrySet()) { if (firstValueWritten) { formatter.append(','); } else { firstValueWritten = true; } writeKeyAndValue(pair.getKey(), pair.getValue(), formatter); } return formatter.toString(); } private static void writeKeyAndValue(String key, Object value, StringBuilder formatter) { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(key, formatter); formatter.append("\":"); if (value == null) { formatter.append("null"); } else if (isUnquotedType(value)) { JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); } else { formatter.append('"'); JSON_STRING_ENCODER.quoteAsString(value.toString(), formatter); formatter.append('"'); } } /** * Returns true if the value is an unquoted JSON type (boolean, number, null). */ private static boolean isUnquotedType(Object value) { return value instanceof Boolean || value instanceof Number; } private static final class ContextKeyValuePair { private final String key; private final Object value; private final Supplier<String> valueSupplier; ContextKeyValuePair(String key, Object value) { this.key = key; this.value = value; this.valueSupplier = null; } ContextKeyValuePair(String key, Supplier<String> valueSupplier) { this.key = key; this.value = null; this.valueSupplier = valueSupplier; } /** * Writes "key":"value" json string to provided StringBuilder. */ public void write(StringBuilder formatter) { if (valueSupplier == null) { writeKeyAndValue(key, value, formatter); } else { writeKeyAndValue(key, valueSupplier.get(), formatter); } } } }
Setting this to null might reflect as `NullPointerException` somewhere, IMO, it would be better to set it to empty string.
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
I am wondering whether we should track the exception itself instead of the error message, and then later when new RunTimeException(message, throwable)
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
Ok. I made change to track exception, which I think addresses also Kushagra concern?
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
This code is not thread-safe - getLatestDatabaseRefreshError could have been changed (set to null) between these two invocations - instead capture a snapshot - then you can safely log it and throw it if != null - also the refresh error should be passed as inner exception in both cases.
private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: logger.error(" More error details: "+this.globalEndpointManager.getLatestDatabaseRefreshError()); throw new RuntimeException("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: + " More error details: "+this.globalEndpointManager.getLatestDatabaseRefreshError()); } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }
logger.error(" More error details: "+this.globalEndpointManager.getLatestDatabaseRefreshError());
private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return this.diagnosticsClientConfig.getMachineId(); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getCorrelationActivityId(nonNullQueryOptions); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { document.set(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig) { return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<Document>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector); } else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) { sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition); } else { sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpecMultiHash( List<CosmosItemIdentity> itemIdentities, PartitionKeyDefinition partitionKeyDefinition) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkValueString = (String) pkValue; List<List<String>> partitionKeyParams = new ArrayList<>(); List<String> paths = partitionKeyDefinition.getPaths(); int pathCount = 0; for (String subPartitionKey: pkValueString.split("=")) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, subPartitionKey)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c."); queryStringBuilder.append(pkParam.get(0).substring(1)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<Document>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<Document> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer()); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(nonNullOptions, klass), klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig()); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return this.diagnosticsClientConfig.getMachineId(); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getCorrelationActivityId(nonNullQueryOptions); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { document.set(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig) { return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<Document>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector); } else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) { sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition); } else { sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpecMultiHash( List<CosmosItemIdentity> itemIdentities, PartitionKeyDefinition partitionKeyDefinition) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkValueString = (String) pkValue; List<List<String>> partitionKeyParams = new ArrayList<>(); List<String> paths = partitionKeyDefinition.getPaths(); int pathCount = 0; for (String subPartitionKey: pkValueString.split("=")) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, subPartitionKey)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c."); queryStringBuilder.append(pkParam.get(0).substring(1)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<Document>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<Document> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer()); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(nonNullOptions, klass), klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig()); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
Why? I think as ong as it is consumed correctly it shoudl be fine. Initial version was indeed not thread-safe - I have changed that part
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
Not really - but with my changes it should be fine.
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
In RxDocumentClientImp - the important part is that the throwable in both logging and when wrapping in RuntimException should be passed in as cause - doing that with null is fine. For consistency you also have to use a snapshot - otherwise the value might have changed between logging and throwing inner exception
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
this.setLatestDatabaseRefreshError(null);
private Mono<DatabaseAccount> getDatabaseAccountAsync(URI serviceEndpoint) { return this.owner.getDatabaseAccountFromEndpoint(serviceEndpoint) .doOnNext(databaseAccount -> { if(databaseAccount != null) { this.latestDatabaseAccount = databaseAccount; this.setLatestDatabaseRefreshError(null); } logger.debug("account retrieved: {}", databaseAccount); }).single(); }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private AtomicReference<String> latestDatabaseRefreshError = new AtomicReference<>(); public void setLatestDatabaseRefreshError(String latestDatabaseRefreshError) { this.latestDatabaseRefreshError.set(latestDatabaseRefreshError); } public String getLatestDatabaseRefreshError() { return latestDatabaseRefreshError.get(); } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); setLatestDatabaseRefreshError("Unable to refresh database account from any location. Exception: " + ex.toString()); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
class GlobalEndpointManager implements AutoCloseable { private static final Logger logger = LoggerFactory.getLogger(GlobalEndpointManager.class); private static final CosmosDaemonThreadFactory theadFactory = new CosmosDaemonThreadFactory("cosmos-global-endpoint-mgr"); private final int backgroundRefreshLocationTimeIntervalInMS; private final LocationCache locationCache; private final URI defaultEndpoint; private final ConnectionPolicy connectionPolicy; private final Duration maxInitializationTime; private final DatabaseAccountManagerInternal owner; private final AtomicBoolean isRefreshing; private final AtomicBoolean refreshInBackground; private final Scheduler scheduler = Schedulers.newSingle(theadFactory); private volatile boolean isClosed; private AtomicBoolean firstTimeDatabaseAccountInitialization = new AtomicBoolean(true); private volatile DatabaseAccount latestDatabaseAccount; private volatile Throwable latestDatabaseRefreshError; public void setLatestDatabaseRefreshError(Throwable latestDatabaseRefreshError) { this.latestDatabaseRefreshError = latestDatabaseRefreshError; } public Throwable getLatestDatabaseRefreshError() { return latestDatabaseRefreshError; } public GlobalEndpointManager(DatabaseAccountManagerInternal owner, ConnectionPolicy connectionPolicy, Configs configs) { this.backgroundRefreshLocationTimeIntervalInMS = configs.getUnavailableLocationsExpirationTimeInSeconds() * 1000; this.maxInitializationTime = Duration.ofSeconds(configs.getGlobalEndpointManagerMaxInitializationTimeInSeconds()); try { this.locationCache = new LocationCache( connectionPolicy, owner.getServiceEndpoint(), configs); this.owner = owner; this.defaultEndpoint = owner.getServiceEndpoint(); this.connectionPolicy = connectionPolicy; this.isRefreshing = new AtomicBoolean(false); this.refreshInBackground = new AtomicBoolean(false); this.isClosed = false; } catch (Exception e) { throw new IllegalArgumentException(e); } } public void init() { startRefreshLocationTimerAsync(true).block(maxInitializationTime); } public UnmodifiableList<URI> getReadEndpoints() { return this.locationCache.getReadEndpoints(); } public UnmodifiableList<URI> getWriteEndpoints() { return this.locationCache.getWriteEndpoints(); } public UnmodifiableList<URI> getApplicableReadEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableReadEndpoints(request); } public UnmodifiableList<URI> getApplicableWriteEndpoints(RxDocumentServiceRequest request) { return this.locationCache.getApplicableWriteEndpoints(request); } public UnmodifiableList<URI> getApplicableReadEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableReadEndpoints(excludedRegions); } public UnmodifiableList<URI> getApplicableWriteEndpoints(List<String> excludedRegions) { return this.locationCache.getApplicableWriteEndpoints(excludedRegions); } public List<URI> getAvailableReadEndpoints() { return this.locationCache.getAvailableReadEndpoints(); } public List<URI> getAvailableWriteEndpoints() { return this.locationCache.getAvailableWriteEndpoints(); } public static Mono<DatabaseAccount> getDatabaseAccountFromAnyLocationsAsync( URI defaultEndpoint, List<String> locations, Function<URI, Mono<DatabaseAccount>> getDatabaseAccountFn) { return getDatabaseAccountFn.apply(defaultEndpoint).onErrorResume( e -> { logger.error("Fail to reach global gateway [{}], [{}]", defaultEndpoint, e.getMessage()); if (locations.isEmpty()) { return Mono.error(e); } Flux<Flux<DatabaseAccount>> obs = Flux.range(0, locations.size()) .map(index -> getDatabaseAccountFn.apply(LocationHelper.getLocationEndpoint(defaultEndpoint, locations.get(index))).flux()); Mono<DatabaseAccount> res = Flux.concatDelayError(obs).take(1).single(); return res.doOnError( innerE -> logger.error("Fail to reach location any of locations {} {}", String.join(",", locations), innerE.getMessage())); }); } public URI resolveServiceEndpoint(RxDocumentServiceRequest request) { URI serviceEndpoint = this.locationCache.resolveServiceEndpoint(request); if (request.faultInjectionRequestContext != null) { request.faultInjectionRequestContext.setLocationEndpointToRoute(serviceEndpoint); } return serviceEndpoint; } public URI resolveFaultInjectionServiceEndpoint(String region, boolean writeOnly) { return this.locationCache.resolveFaultInjectionEndpoint(region, writeOnly); } public URI getDefaultEndpoint() { return this.locationCache.getDefaultEndpoint(); } public void markEndpointUnavailableForRead(URI endpoint) { logger.debug("Marking endpoint {} unavailable for read",endpoint); this.locationCache.markEndpointUnavailableForRead(endpoint);; } public void markEndpointUnavailableForWrite(URI endpoint) { logger.debug("Marking endpoint {} unavailable for Write",endpoint); this.locationCache.markEndpointUnavailableForWrite(endpoint); } public boolean canUseMultipleWriteLocations() { return this.locationCache.canUseMultipleWriteLocations(); } public boolean canUseMultipleWriteLocations(RxDocumentServiceRequest request) { return this.locationCache.canUseMultipleWriteLocations(request); } public void close() { this.isClosed = true; this.scheduler.dispose(); logger.debug("GlobalEndpointManager closed."); } public Mono<Void> refreshLocationAsync(DatabaseAccount databaseAccount, boolean forceRefresh) { return Mono.defer(() -> { logger.debug("refreshLocationAsync() invoked"); if (forceRefresh) { Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); return dbAccount; }).flatMap(dbAccount -> { return Mono.empty(); }); } if (!isRefreshing.compareAndSet(false, true)) { logger.debug("in the middle of another refresh. Not invoking a new refresh."); return Mono.empty(); } logger.debug("will refresh"); return this.refreshLocationPrivateAsync(databaseAccount).doOnError(e -> this.isRefreshing.set(false)); }); } /** * This will provide the latest databaseAccount. * If due to some reason last databaseAccount update was null, * this method will return previous valid value * @return DatabaseAccount */ public DatabaseAccount getLatestDatabaseAccount() { return this.latestDatabaseAccount; } public int getPreferredLocationCount() { return this.connectionPolicy.getPreferredRegions() != null ? this.connectionPolicy.getPreferredRegions().size() : 0; } private Mono<Void> refreshLocationPrivateAsync(DatabaseAccount databaseAccount) { return Mono.defer(() -> { logger.debug("refreshLocationPrivateAsync() refreshing locations"); if (databaseAccount != null) { this.locationCache.onDatabaseAccountRead(databaseAccount); } Utils.ValueHolder<Boolean> canRefreshInBackground = new Utils.ValueHolder<>(); if (this.locationCache.shouldRefreshEndpoints(canRefreshInBackground)) { logger.debug("shouldRefreshEndpoints: true"); if (databaseAccount == null && !canRefreshInBackground.v) { logger.debug("shouldRefreshEndpoints: can't be done in background"); Mono<DatabaseAccount> databaseAccountObs = getDatabaseAccountFromAnyLocationsAsync( this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.map(dbAccount -> { this.locationCache.onDatabaseAccountRead(dbAccount); this.isRefreshing.set(false); return dbAccount; }).flatMap(dbAccount -> { if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } return Mono.empty(); }); } if (!this.refreshInBackground.get()) { this.startRefreshLocationTimerAsync(); } this.isRefreshing.set(false); return Mono.empty(); } else { logger.debug("shouldRefreshEndpoints: false, nothing to do."); this.isRefreshing.set(false); return Mono.empty(); } }); } private void startRefreshLocationTimerAsync() { startRefreshLocationTimerAsync(false).subscribe(); } private Mono<Void> startRefreshLocationTimerAsync(boolean initialization) { if (this.isClosed) { logger.debug("startRefreshLocationTimerAsync: nothing to do, it is closed"); return Mono.empty(); } logger.debug("registering a refresh in [{}] ms", this.backgroundRefreshLocationTimeIntervalInMS); LocalDateTime now = LocalDateTime.now(); int delayInMillis = initialization ? 0: this.backgroundRefreshLocationTimeIntervalInMS; this.refreshInBackground.set(true); return Mono.delay(Duration.ofMillis(delayInMillis), CosmosSchedulers.COSMOS_PARALLEL) .flatMap( t -> { if (this.isClosed) { logger.warn("client already closed"); return Mono.empty(); } logger.debug("startRefreshLocationTimerAsync() - Invoking refresh, I was registered on [{}]", now); Mono<DatabaseAccount> databaseAccountObs = GlobalEndpointManager.getDatabaseAccountFromAnyLocationsAsync(this.defaultEndpoint, new ArrayList<>(this.connectionPolicy.getPreferredRegions()), this::getDatabaseAccountAsync); return databaseAccountObs.flatMap(dbAccount -> { logger.debug("db account retrieved"); this.refreshInBackground.set(false); return this.refreshLocationPrivateAsync(dbAccount); }); }).onErrorResume(ex -> { logger.error("startRefreshLocationTimerAsync() - Unable to refresh database account from any location. Exception: {}", ex.toString(), ex); this.setLatestDatabaseRefreshError(ex); this.startRefreshLocationTimerAsync(); return Mono.empty(); }).subscribeOn(scheduler); } public boolean isClosed() { return this.isClosed; } public String getRegionName(URI locationEndpoint, OperationType operationType) { return this.locationCache.getRegionName(locationEndpoint, operationType); } }
Shouldn't we set the inner exception as `databaseRefreshErrorSnapshot`?
private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: this.globalEndpointManager.getLatestDatabaseRefreshError()); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }
this.globalEndpointManager.getLatestDatabaseRefreshError());
private void initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return this.diagnosticsClientConfig.getMachineId(); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getCorrelationActivityId(nonNullQueryOptions); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { document.set(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig) { return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<Document>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector); } else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) { sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition); } else { sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpecMultiHash( List<CosmosItemIdentity> itemIdentities, PartitionKeyDefinition partitionKeyDefinition) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkValueString = (String) pkValue; List<List<String>> partitionKeyParams = new ArrayList<>(); List<String> paths = partitionKeyDefinition.getPaths(); int pathCount = 0; for (String subPartitionKey: pkValueString.split("=")) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, subPartitionKey)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c."); queryStringBuilder.append(pkParam.get(0).substring(1)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<Document>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<Document> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer()); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(nonNullOptions, klass), klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig()); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final ItemDeserializer itemDeserializer = new ItemDeserializer.JsonDeserializer(); private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private SessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}], directModeProtocol [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); this.initializeGatewayConfigurationReader(); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return this.diagnosticsClientConfig.getMachineId(); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(database); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor().getOperationContext(options); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getCorrelationActivityId(nonNullQueryOptions); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = ModelBridgeInternal.serializeJsonToByteBuffer(collection); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken(resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ModelBridgeInternal.toJsonFromJsonSerializable((JsonSerializable) object); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, ModelBridgeInternal.toJsonFromJsonSerializable(offer.getOfferAutoScaleSettings())); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, mapper, trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache.resolveCollectionAsync(metadataDiagnosticsCtx, request)) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = documentFromObject(document, mapper); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { document.set(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = serializeJsonToByteBuffer(document); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig(RequestOptions options) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig) { return policyConfig != null ? policyConfig : this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<Document>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<Document>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), Document.class, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<Document> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults().stream().map(document -> ModelBridgeInternal.toObjectFromJsonSerializable(document, klass)).collect(Collectors.toList())); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); String partitionKeySelector = createPkSelector(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelector.equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList, partitionKeySelector); } else if (partitionKeyDefinition.getKind().equals(PartitionKind.MULTI_HASH)) { sqlQuerySpec = createReadManyQuerySpecMultiHash(entry.getValue(), partitionKeyDefinition); } else { sqlQuerySpec = createReadManyQuerySpec(cosmosItemIdentityList, partitionKeySelector); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame( List<CosmosItemIdentity> idPartitionKeyPairList, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec(List<CosmosItemIdentity> itemIdentities, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkParamName = "@param" + (2 * i); parameters.add(new SqlParameter(pkParamName, pkValue)); String idValue = itemIdentity.getId(); String idParamName = "@param" + (2 * i + 1); parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpecMultiHash( List<CosmosItemIdentity> itemIdentities, PartitionKeyDefinition partitionKeyDefinition) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(pkValueAsPartitionKey); String pkValueString = (String) pkValue; List<List<String>> partitionKeyParams = new ArrayList<>(); List<String> paths = partitionKeyDefinition.getPaths(); int pathCount = 0; for (String subPartitionKey: pkValueString.split("=")) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(paths.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, subPartitionKey)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c."); queryStringBuilder.append(pkParam.get(0).substring(1)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private String createPkSelector(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.joining()); } private <T extends Resource> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<Document>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<Document> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = ModelBridgeInternal.createCosmosAsyncItemResponse(resourceResponse, klass, getItemDeserializer()); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(InternalObjectNode.fromObject(cosmosItemResponse.getItem())), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); String pkSelector = createPkSelector(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, pkSelector); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> toFeedResponsePage( response, ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .getItemFactoryMethod(nonNullOptions, klass), klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Object getSession() { return this.sessionContainer; } public void setSession(Object sessionContainer) { this.sessionContainer = (SessionContainer) sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public ItemDeserializer getItemDeserializer() { return this.itemDeserializer; } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, String partitionKeySelector) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object pkValue = ModelBridgeInternal.getPartitionKeyObject(partitionKey); String pkParamName = "@pkValue"; parameters.add(new SqlParameter(pkParamName, pkValue)); queryStringBuilder.append(" c"); queryStringBuilder.append(partitionKeySelector); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParamName); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig()); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
what is the 2 parameters?
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
.createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null)
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
The two parameters `ifMatch` and `ifNoneMatch` were added in method `VirtualMachinesClient.createOrUpdateWithResponseAsync`. The parameter values following to `VirtualMachinesClientImpl.createOrUpdateAsync`. ![image](https://github.com/Azure/azure-sdk-for-java/assets/74638143/726cfd24-36ff-4fdf-aad5-dad299fc734a)
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
.createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null)
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
OK. So they started to support etag...
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
.createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null)
public Accepted<VirtualMachine> beginCreate() { return AcceptedImpl .<VirtualMachine, VirtualMachineInner>newAccepted( logger, this.manager().serviceClient().getHttpPipeline(), this.manager().serviceClient().getDefaultPollInterval(), () -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateWithResponseAsync(resourceGroupName(), vmName, innerModel(), null, null) .block(), inner -> new VirtualMachineImpl( inner.name(), inner, this.manager(), this.storageManager, this.networkManager, this.authorizationManager), VirtualMachineInner.class, () -> { Flux<Indexable> dependencyTasksAsync = taskGroup().invokeDependencyAsync(taskGroup().newInvocationContext()); dependencyTasksAsync.blockLast(); prepareCreateResourceAsync().block(); }, this::reset, Context.NONE); }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
class VirtualMachineImpl extends GroupableResourceImpl<VirtualMachine, VirtualMachineInner, VirtualMachineImpl, ComputeManager> implements VirtualMachine, VirtualMachine.DefinitionManagedOrUnmanaged, VirtualMachine.DefinitionManaged, VirtualMachine.DefinitionUnmanaged, VirtualMachine.Update, VirtualMachine.DefinitionStages.WithSystemAssignedIdentityBasedAccessOrCreate, VirtualMachine.UpdateStages.WithSystemAssignedIdentityBasedAccessOrUpdate { private final ClientLogger logger = new ClientLogger(VirtualMachineImpl.class); private final StorageManager storageManager; private final NetworkManager networkManager; private final AuthorizationManager authorizationManager; private final String vmName; private final IdentifierProvider namer; private String creatableStorageAccountKey; private String creatableAvailabilitySetKey; private String creatablePrimaryNetworkInterfaceKey; private List<String> creatableSecondaryNetworkInterfaceKeys; private StorageAccount existingStorageAccountToAssociate; private AvailabilitySet existingAvailabilitySetToAssociate; private NetworkInterface existingPrimaryNetworkInterfaceToAssociate; private List<NetworkInterface> existingSecondaryNetworkInterfacesToAssociate; private VirtualMachineInstanceView virtualMachineInstanceView; private boolean isMarketplaceLinuxImage; private NetworkInterface.DefinitionStages.WithPrimaryPrivateIP nicDefinitionWithPrivateIp; private NetworkInterface.DefinitionStages.WithPrimaryNetworkSubnet nicDefinitionWithSubnet; private NetworkInterface.DefinitionStages.WithCreate nicDefinitionWithCreate; private VirtualMachineExtensionsImpl virtualMachineExtensions; private boolean isUnmanagedDiskSelected; private List<VirtualMachineUnmanagedDataDisk> unmanagedDataDisks; private final ManagedDataDiskCollection managedDataDisks; private final BootDiagnosticsHandler bootDiagnosticsHandler; private VirtualMachineMsiHandler virtualMachineMsiHandler; private PublicIpAddress.DefinitionStages.WithCreate implicitPipCreatable; private String newProximityPlacementGroupName; private ProximityPlacementGroupType newProximityPlacementGroupType; private boolean removeOsProfile; private DeleteOptions primaryNetworkInterfaceDeleteOptions; private final Map<String, DeleteOptions> secondaryNetworkInterfaceDeleteOptions = new HashMap<>(); private VirtualMachineUpdateInner updateParameterSnapshotOnUpdate; private static final SerializerAdapter SERIALIZER_ADAPTER = SerializerFactory.createDefaultManagementSerializerAdapter(); VirtualMachineImpl( String name, VirtualMachineInner innerModel, final ComputeManager computeManager, final StorageManager storageManager, final NetworkManager networkManager, final AuthorizationManager authorizationManager) { super(name, innerModel, computeManager); this.storageManager = storageManager; this.networkManager = networkManager; this.authorizationManager = authorizationManager; this.vmName = name; this.isMarketplaceLinuxImage = false; this.namer = this.manager().resourceManager().internalContext().createIdentifierProvider(this.vmName); this.creatableSecondaryNetworkInterfaceKeys = new ArrayList<>(); this.existingSecondaryNetworkInterfacesToAssociate = new ArrayList<>(); this.virtualMachineExtensions = new VirtualMachineExtensionsImpl(computeManager.serviceClient().getVirtualMachineExtensions(), this); this.managedDataDisks = new ManagedDataDiskCollection(this); initializeDataDisks(); this.bootDiagnosticsHandler = new BootDiagnosticsHandler(this); this.virtualMachineMsiHandler = new VirtualMachineMsiHandler(authorizationManager, this); this.newProximityPlacementGroupName = null; this.newProximityPlacementGroupType = null; } @Override public VirtualMachineImpl update() { updateParameterSnapshotOnUpdate = this.deepCopyInnerToUpdateParameter(); return super.update(); } @Override public Mono<VirtualMachine> refreshAsync() { return super .refreshAsync() .map( virtualMachine -> { reset(virtualMachine.innerModel()); virtualMachineExtensions.refresh(); return virtualMachine; }); } @Override protected Mono<VirtualMachineInner> getInnerAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupAsync(this.resourceGroupName(), this.name()); } @Override public void deallocate() { this.deallocateAsync().block(); } @Override public Mono<Void> deallocateAsync() { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name()) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void deallocate(boolean hibernate) { this.deallocateAsync(hibernate).block(); } @Override public Mono<Void> deallocateAsync(boolean hibernate) { return this .manager() .serviceClient() .getVirtualMachines() .deallocateAsync(this.resourceGroupName(), this.name(), hibernate) .map(aVoid -> this.refreshAsync()) .then(); } @Override public void generalize() { this.generalizeAsync().block(); } @Override public Mono<Void> generalizeAsync() { return this .manager() .serviceClient() .getVirtualMachines() .generalizeAsync(this.resourceGroupName(), this.name()); } @Override public void powerOff() { this.powerOffAsync().block(); } @Override public Mono<Void> powerOffAsync() { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), null); } @Override public void powerOff(boolean skipShutdown) { this.powerOffAsync(skipShutdown).block(); } @Override public Mono<Void> powerOffAsync(boolean skipShutdown) { return this .manager() .serviceClient() .getVirtualMachines() .powerOffAsync(this.resourceGroupName(), this.name(), skipShutdown); } @Override public void restart() { this.restartAsync().block(); } @Override public Mono<Void> restartAsync() { return this.manager().serviceClient().getVirtualMachines().restartAsync(this.resourceGroupName(), this.name()); } @Override public void start() { this.startAsync().block(); } @Override public Mono<Void> startAsync() { return this.manager().serviceClient().getVirtualMachines().startAsync(this.resourceGroupName(), this.name()); } @Override public void redeploy() { this.redeployAsync().block(); } @Override public Mono<Void> redeployAsync() { return this.manager().serviceClient().getVirtualMachines().redeployAsync(this.resourceGroupName(), this.name()); } @Override public void simulateEviction() { this.simulateEvictionAsync().block(); } @Override public Mono<Void> simulateEvictionAsync() { return this .manager() .serviceClient() .getVirtualMachines() .simulateEvictionAsync(this.resourceGroupName(), this.name()); } @Override public void convertToManaged() { this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisks(this.resourceGroupName(), this.name()); this.refresh(); } @Override public Mono<Void> convertToManagedAsync() { return this .manager() .serviceClient() .getVirtualMachines() .convertToManagedDisksAsync(this.resourceGroupName(), this.name()) .flatMap(aVoid -> refreshAsync()) .then(); } @Override public VirtualMachineEncryption diskEncryption() { return new VirtualMachineEncryptionImpl(this); } @Override public PagedIterable<VirtualMachineSize> availableSizes() { return PagedConverter.mapPage(this .manager() .serviceClient() .getVirtualMachines() .listAvailableSizes(this.resourceGroupName(), this.name()), VirtualMachineSizeImpl::new); } @Override public String capture(String containerName, String vhdPrefix, boolean overwriteVhd) { return this.captureAsync(containerName, vhdPrefix, overwriteVhd).block(); } @Override public Mono<String> captureAsync(String containerName, String vhdPrefix, boolean overwriteVhd) { VirtualMachineCaptureParameters parameters = new VirtualMachineCaptureParameters(); parameters.withDestinationContainerName(containerName); parameters.withOverwriteVhds(overwriteVhd); parameters.withVhdPrefix(vhdPrefix); return this .manager() .serviceClient() .getVirtualMachines() .captureAsync(this.resourceGroupName(), this.name(), parameters) .map( captureResultInner -> { try { return SerializerUtils.getObjectMapper().writeValueAsString(captureResultInner); } catch (JsonProcessingException ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } }); } @Override public VirtualMachineInstanceView refreshInstanceView() { return refreshInstanceViewAsync().block(); } @Override public Mono<VirtualMachineInstanceView> refreshInstanceViewAsync() { return this .manager() .serviceClient() .getVirtualMachines() .getByResourceGroupWithResponseAsync( this.resourceGroupName(), this.name(), InstanceViewTypes.INSTANCE_VIEW) .map( inner -> { virtualMachineInstanceView = new VirtualMachineInstanceViewImpl(inner.getValue().instanceView()); return virtualMachineInstanceView; }) .switchIfEmpty( Mono .defer( () -> { virtualMachineInstanceView = null; return Mono.empty(); })); } @Override public RunCommandResult runPowerShellScript( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runPowerShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runPowerShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runShellScript(List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScript(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public Mono<RunCommandResult> runShellScriptAsync( List<String> scriptLines, List<RunCommandInputParameter> scriptParameters) { return this .manager() .virtualMachines() .runShellScriptAsync(this.resourceGroupName(), this.name(), scriptLines, scriptParameters); } @Override public RunCommandResult runCommand(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommand(this.resourceGroupName(), this.name(), inputCommand); } @Override public Mono<RunCommandResult> runCommandAsync(RunCommandInput inputCommand) { return this.manager().virtualMachines().runCommandAsync(this.resourceGroupName(), this.name(), inputCommand); } @Override public VirtualMachineImpl withNewPrimaryNetwork(Creatable<Network> creatable) { this.nicDefinitionWithPrivateIp = this.preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)).withNewPrimaryNetwork(creatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetwork(String addressSpace) { this.nicDefinitionWithPrivateIp = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withNewPrimaryNetwork(addressSpace); return this; } @Override public VirtualMachineImpl withExistingPrimaryNetwork(Network network) { this.nicDefinitionWithSubnet = this .preparePrimaryNetworkInterface(this.namer.getRandomName("nic", 20)) .withExistingPrimaryNetwork(network); return this; } @Override public VirtualMachineImpl withSubnet(String name) { this.nicDefinitionWithPrivateIp = this.nicDefinitionWithSubnet.withSubnet(name); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressDynamic() { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressDynamic(); return this; } @Override public VirtualMachineImpl withPrimaryPrivateIPAddressStatic(String staticPrivateIPAddress) { this.nicDefinitionWithCreate = this.nicDefinitionWithPrivateIp.withPrimaryPrivateIPAddressStatic(staticPrivateIPAddress); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(Creatable<PublicIpAddress> creatable) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(creatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel) { return withNewPrimaryPublicIPAddress(leafDnsLabel, null); } public VirtualMachineImpl withNewPrimaryPublicIPAddress(String leafDnsLabel, DeleteOptions deleteOptions) { PublicIpAddress.DefinitionStages.WithGroup definitionWithGroup = this .networkManager .publicIpAddresses() .define(this.namer.getRandomName("pip", 15)) .withRegion(this.regionName()); PublicIpAddress.DefinitionStages.WithCreate definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } this.implicitPipCreatable = definitionAfterGroup.withLeafDomainLabel(leafDnsLabel); Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withNewPrimaryPublicIPAddress(this.implicitPipCreatable); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withExistingPrimaryPublicIPAddress(PublicIpAddress publicIPAddress) { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate.withExistingPrimaryPublicIPAddress(publicIPAddress); this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withoutPrimaryPublicIPAddress() { Creatable<NetworkInterface> nicCreatable = this.nicDefinitionWithCreate; this.creatablePrimaryNetworkInterfaceKey = this.addDependency(nicCreatable); return this; } @Override public VirtualMachineImpl withNewPrimaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatablePrimaryNetworkInterfaceKey = this.addDependency(creatable); return this; } public VirtualMachineImpl withNewPrimaryNetworkInterface(String name, String publicDnsNameLabel) { Creatable<NetworkInterface> definitionCreatable = prepareNetworkInterface(name).withNewPrimaryPublicIPAddress(publicDnsNameLabel); return withNewPrimaryNetworkInterface(definitionCreatable); } @Override public VirtualMachineImpl withExistingPrimaryNetworkInterface(NetworkInterface networkInterface) { this.existingPrimaryNetworkInterfaceToAssociate = networkInterface; return this; } @Override public VirtualMachineImpl withStoredWindowsImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.WINDOWS); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withStoredLinuxImage(String imageUrl) { VirtualHardDisk userImageVhd = new VirtualHardDisk(); userImageVhd.withUri(imageUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().osDisk().withImage(userImageVhd); this.innerModel().storageProfile().osDisk().withOsType(OperatingSystemTypes.LINUX); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); return this; } @Override public VirtualMachineImpl withPopularWindowsImage(KnownWindowsVirtualMachineImage knownImage) { return withSpecificWindowsImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withPopularLinuxImage(KnownLinuxVirtualMachineImage knownImage) { return withSpecificLinuxImageVersion(knownImage.imageReference()); } @Override public VirtualMachineImpl withSpecificWindowsImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecificLinuxImageVersion(ImageReference imageReference) { this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReference); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withLatestWindowsImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificWindowsImageVersion(imageReference); } @Override public VirtualMachineImpl withLatestLinuxImage(String publisher, String offer, String sku) { ImageReference imageReference = new ImageReference(); imageReference.withPublisher(publisher); imageReference.withOffer(offer); imageReference.withSku(sku); imageReference.withVersion("latest"); return withSpecificLinuxImageVersion(imageReference); } @Override public VirtualMachineImpl withGeneralizedWindowsCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withWindowsConfiguration(new WindowsConfiguration()); this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(true); this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(true); return this; } @Override public VirtualMachineImpl withSpecializedWindowsCustomImage(String customImageId) { this.withGeneralizedWindowsCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedWindowsGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedWindowsCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withGeneralizedLinuxCustomImage(String customImageId) { ImageReference imageReferenceInner = new ImageReference(); imageReferenceInner.withId(customImageId); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); this.innerModel().storageProfile().withImageReference(imageReferenceInner); this.innerModel().osProfile().withLinuxConfiguration(new LinuxConfiguration()); this.isMarketplaceLinuxImage = true; return this; } @Override public VirtualMachineImpl withSpecializedLinuxCustomImage(String customImageId) { this.withGeneralizedLinuxCustomImage(customImageId); this.removeOsProfile = true; return this; } @Override public VirtualMachineImpl withGeneralizedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withGeneralizedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedLinuxGalleryImageVersion(String galleryImageVersionId) { return this.withSpecializedLinuxCustomImage(galleryImageVersionId); } @Override public VirtualMachineImpl withSpecializedOSUnmanagedDisk(String osDiskUrl, OperatingSystemTypes osType) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(osDiskUrl); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withVhd(osVhd); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withManagedDisk(null); return this; } @Override public VirtualMachineImpl withSpecializedOSDisk(Disk disk, OperatingSystemTypes osType) { ManagedDiskParameters diskParametersInner = new ManagedDiskParameters(); diskParametersInner.withId(disk.id()); this.innerModel().storageProfile().osDisk().withCreateOption(DiskCreateOptionTypes.ATTACH); this.innerModel().storageProfile().osDisk().withManagedDisk(diskParametersInner); this.innerModel().storageProfile().osDisk().withOsType(osType); this.innerModel().storageProfile().osDisk().withVhd(null); return this; } @Override public VirtualMachineImpl withRootUsername(String rootUserName) { this.innerModel().osProfile().withAdminUsername(rootUserName); return this; } @Override public VirtualMachineImpl withAdminUsername(String adminUserName) { this.innerModel().osProfile().withAdminUsername(adminUserName); return this; } @Override public VirtualMachineImpl withSsh(String publicKeyData) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration().ssh() == null) { SshConfiguration sshConfiguration = new SshConfiguration(); sshConfiguration.withPublicKeys(new ArrayList<SshPublicKey>()); osProfile.linuxConfiguration().withSsh(sshConfiguration); } SshPublicKey sshPublicKey = new SshPublicKey(); sshPublicKey.withKeyData(publicKeyData); sshPublicKey.withPath("/home/" + osProfile.adminUsername() + "/.ssh/authorized_keys"); osProfile.linuxConfiguration().ssh().publicKeys().add(sshPublicKey); return this; } @Override public VirtualMachineImpl withoutVMAgent() { this.innerModel().osProfile().windowsConfiguration().withProvisionVMAgent(false); return this; } @Override public VirtualMachineImpl withoutAutoUpdate() { this.innerModel().osProfile().windowsConfiguration().withEnableAutomaticUpdates(false); return this; } @Override public VirtualMachineImpl withTimeZone(String timeZone) { this.innerModel().osProfile().windowsConfiguration().withTimeZone(timeZone); return this; } @Override public VirtualMachineImpl withWinRM(WinRMListener listener) { if (this.innerModel().osProfile().windowsConfiguration().winRM() == null) { WinRMConfiguration winRMConfiguration = new WinRMConfiguration(); this.innerModel().osProfile().windowsConfiguration().withWinRM(winRMConfiguration); } this.innerModel().osProfile().windowsConfiguration().winRM().listeners().add(listener); return this; } @Override public VirtualMachineImpl withRootPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withAdminPassword(String password) { this.innerModel().osProfile().withAdminPassword(password); return this; } @Override public VirtualMachineImpl withCustomData(String base64EncodedCustomData) { this.innerModel().osProfile().withCustomData(base64EncodedCustomData); return this; } @Override public VirtualMachineImpl withUserData(String base64EncodedUserData) { this.innerModel().withUserData(base64EncodedUserData); return this; } @Override public VirtualMachineImpl withComputerName(String computerName) { this.innerModel().osProfile().withComputerName(computerName); return this; } @Override public VirtualMachineImpl withSize(String sizeName) { this.innerModel().hardwareProfile().withVmSize(VirtualMachineSizeTypes.fromString(sizeName)); return this; } @Override public VirtualMachineImpl withSize(VirtualMachineSizeTypes size) { this.innerModel().hardwareProfile().withVmSize(size); return this; } @Override public VirtualMachineImpl withOSDiskCaching(CachingTypes cachingType) { this.innerModel().storageProfile().osDisk().withCaching(cachingType); return this; } @Override public VirtualMachineImpl withOSDiskVhdLocation(String containerName, String vhdName) { if (isManagedDiskEnabled()) { return this; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!this.isOSDiskFromImage(osDisk)) { return this; } if (this.isOsDiskFromCustomImage(storageProfile)) { return this; } if (this.isOSDiskFromPlatformImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); osVhd.withUri(temporaryBlobUrl(containerName, vhdName)); this.innerModel().storageProfile().osDisk().withVhd(osVhd); return this; } if (this.isOSDiskFromStoredImage(storageProfile)) { VirtualHardDisk osVhd = new VirtualHardDisk(); try { URL sourceCustomImageUrl = new URL(osDisk.image().uri()); URL destinationVhdUrl = new URL( sourceCustomImageUrl.getProtocol(), sourceCustomImageUrl.getHost(), "/" + containerName + "/" + vhdName); osVhd.withUri(destinationVhdUrl.toString()); } catch (MalformedURLException ex) { throw logger.logExceptionAsError(new RuntimeException(ex)); } this.innerModel().storageProfile().osDisk().withVhd(osVhd); } return this; } @Override public VirtualMachineImpl withOSDiskStorageAccountType(StorageAccountTypes accountType) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk().withManagedDisk(new ManagedDiskParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().withStorageAccountType(accountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultCachingType(CachingTypes cachingType) { this.managedDataDisks.setDefaultCachingType(cachingType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultStorageAccountType(StorageAccountTypes storageAccountType) { this.managedDataDisks.setDefaultStorageAccountType(storageAccountType); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDeleteOptions(DeleteOptions deleteOptions) { this.managedDataDisks.setDefaultDeleteOptions(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withDataDiskDefaultDiskEncryptionSet( String diskEncryptionSetId) { this.managedDataDisks.setDefaultEncryptionSet(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withOSDiskEncryptionSettings(DiskEncryptionSettings settings) { this.innerModel().storageProfile().osDisk().withEncryptionSettings(settings); return this; } @Override public VirtualMachineImpl withOSDiskSizeInGB(int size) { this.innerModel().storageProfile().osDisk().withDiskSizeGB(size); return this; } @Override public VirtualMachineImpl withOSDiskName(String name) { this.innerModel().storageProfile().osDisk().withName(name); return this; } @Override public VirtualMachineImpl withOSDiskDeleteOptions(DeleteOptions deleteOptions) { this.innerModel().storageProfile().osDisk() .withDeleteOption(DiskDeleteOptionTypes.fromString(deleteOptions.toString())); return this; } @Override public VirtualMachineImpl withOSDiskDiskEncryptionSet(String diskEncryptionSetId) { if (this.innerModel().storageProfile().osDisk().managedDisk() == null) { this.innerModel().storageProfile().osDisk() .withManagedDisk(new ManagedDiskParameters()); } if (this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { this.innerModel().storageProfile().osDisk().managedDisk() .withDiskEncryptionSet(new DiskEncryptionSetParameters()); } this.innerModel().storageProfile().osDisk().managedDisk().diskEncryptionSet().withId(diskEncryptionSetId); return this; } @Override public VirtualMachineImpl withEphemeralOSDisk() { if (this.innerModel().storageProfile().osDisk().diffDiskSettings() == null) { this.innerModel().storageProfile().osDisk().withDiffDiskSettings(new DiffDiskSettings()); } this.innerModel().storageProfile().osDisk().diffDiskSettings().withOption(DiffDiskOptions.LOCAL); withOSDiskCaching(CachingTypes.READ_ONLY); return this; } @Override public UnmanagedDataDiskImpl defineUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return UnmanagedDataDiskImpl.prepareDataDisk(name, this); } @Override public VirtualMachineImpl withNewUnmanagedDataDisk(Integer sizeInGB) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withNewVhd(sizeInGB).attach(); } @Override public VirtualMachineImpl withExistingUnmanagedDataDisk( String storageAccountName, String containerName, String vhdName) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_BOTH_MANAGED_AND_UNMANAGED_DISK_NOT_ALLOWED); return defineUnmanagedDataDisk(null).withExistingVhd(storageAccountName, containerName, vhdName).attach(); } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(String name) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.name().equalsIgnoreCase(name)) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public VirtualMachineImpl withoutUnmanagedDataDisk(int lun) { int idx = -1; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { idx++; if (dataDisk.lun() == lun) { this.unmanagedDataDisks.remove(idx); this.innerModel().storageProfile().dataDisks().remove(idx); break; } } return this; } @Override public UnmanagedDataDiskImpl updateUnmanagedDataDisk(String name) { throwIfManagedDiskEnabled(ManagedUnmanagedDiskErrors.VM_NO_UNMANAGED_DISK_TO_UPDATE); for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.name().equalsIgnoreCase(name)) { return (UnmanagedDataDiskImpl) dataDisk; } } throw logger.logExceptionAsError(new RuntimeException("A data disk with name '" + name + "' not found")); } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.newDisksToAttach.put(this.addDependency(creatable), new DataDisk().withLun(-1)); return this; } @Override public VirtualMachineImpl withNewDataDisk(Creatable<Disk> creatable, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .newDisksToAttach .put(this.addDependency(creatable), new DataDisk().withLun(lun).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this.managedDataDisks.implicitDisksToAssociate.add(new DataDisk().withLun(-1).withDiskSizeGB(sizeInGB)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); this .managedDataDisks .implicitDisksToAssociate .add(new DataDisk().withLun(lun).withDiskSizeGB(sizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDisk( int sizeInGB, int lun, CachingTypes cachingType, StorageAccountTypes storageAccountType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(cachingType) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDisk(int sizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(lun) .withDiskSizeGB(sizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(-1).withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add(new DataDisk().withLun(lun).withManagedDisk(managedDiskParameters).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk(Disk disk, int newSizeInGB, int lun, CachingTypes cachingType) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withExistingDataDisk( Disk disk, int newSizeInGB, int lun, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withId(disk.id()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } this .managedDataDisks .existingDisksToAttach .add( new DataDisk() .withLun(lun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun) { this.managedDataDisks.newDisksFromImage.add(new DataDisk().withLun(imageLun)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage(int imageLun, int newSizeInGB, CachingTypes cachingType) { this .managedDataDisks .newDisksFromImage .add(new DataDisk().withLun(imageLun).withDiskSizeGB(newSizeInGB).withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, CachingTypes cachingType, StorageAccountTypes storageAccountType) { ManagedDiskParameters managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(storageAccountType); this .managedDataDisks .newDisksFromImage .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withManagedDisk(managedDiskParameters) .withCaching(cachingType)); return this; } @Override public VirtualMachineImpl withNewDataDiskFromImage( int imageLun, int newSizeInGB, VirtualMachineDiskOptions options) { throwIfManagedDiskDisabled(ManagedUnmanagedDiskErrors.VM_BOTH_UNMANAGED_AND_MANAGED_DISK_NOT_ALLOWED); ManagedDiskParameters managedDiskParameters = null; if (options.storageAccountType() != null || options.isDiskEncryptionSetConfigured()) { managedDiskParameters = new ManagedDiskParameters(); managedDiskParameters.withStorageAccountType(options.storageAccountType()); if (options.isDiskEncryptionSetConfigured()) { managedDiskParameters.withDiskEncryptionSet( new DiskEncryptionSetParameters().withId(options.diskEncryptionSetId())); } } this .managedDataDisks .implicitDisksToAssociate .add( new DataDisk() .withLun(imageLun) .withDiskSizeGB(newSizeInGB) .withCaching(options.cachingTypes()) .withDeleteOption(diskDeleteOptionsFromDeleteOptions(options.deleteOptions())) .withManagedDisk(managedDiskParameters)); return this; } @Override public VirtualMachineImpl withoutDataDisk(int lun) { if (!isManagedDiskEnabled()) { return this; } this.managedDataDisks.diskLunsToRemove.add(lun); return this; } @Override public VirtualMachineImpl withNewStorageAccount(Creatable<StorageAccount> creatable) { if (this.creatableStorageAccountKey == null) { this.creatableStorageAccountKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withNewStorageAccount(String name) { StorageAccount.DefinitionStages.WithGroup definitionWithGroup = this.storageManager.storageAccounts().define(name).withRegion(this.regionName()); Creatable<StorageAccount> definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return withNewStorageAccount(definitionAfterGroup); } @Override public VirtualMachineImpl withExistingStorageAccount(StorageAccount storageAccount) { this.existingStorageAccountToAssociate = storageAccount; return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(Creatable<AvailabilitySet> creatable) { if (this.creatableAvailabilitySetKey == null) { this.creatableAvailabilitySetKey = this.addDependency(creatable); } return this; } @Override public VirtualMachineImpl withProximityPlacementGroup(String proximityPlacementGroupId) { this.innerModel().withProximityPlacementGroup(new SubResource().withId(proximityPlacementGroupId)); newProximityPlacementGroupName = null; return this; } @Override public VirtualMachineImpl withNewProximityPlacementGroup( String proximityPlacementGroupName, ProximityPlacementGroupType type) { this.newProximityPlacementGroupName = proximityPlacementGroupName; this.newProximityPlacementGroupType = type; this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withoutProximityPlacementGroup() { this.innerModel().withProximityPlacementGroup(null); return this; } @Override public VirtualMachineImpl withNewAvailabilitySet(String name) { AvailabilitySet.DefinitionStages.WithGroup definitionWithGroup = super.myManager.availabilitySets().define(name).withRegion(this.regionName()); AvailabilitySet.DefinitionStages.WithSku definitionWithSku; if (this.creatableGroup != null) { definitionWithSku = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithSku = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } Creatable<AvailabilitySet> creatable; if (isManagedDiskEnabled()) { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.ALIGNED); } else { creatable = definitionWithSku.withSku(AvailabilitySetSkuTypes.CLASSIC); } return withNewAvailabilitySet(creatable); } @Override public VirtualMachineImpl withExistingAvailabilitySet(AvailabilitySet availabilitySet) { this.existingAvailabilitySetToAssociate = availabilitySet; return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable) { this.creatableSecondaryNetworkInterfaceKeys.add(this.addDependency(creatable)); return this; } @Override public VirtualMachineImpl withNewSecondaryNetworkInterface(Creatable<NetworkInterface> creatable, DeleteOptions deleteOptions) { String key = this.addDependency(creatable); this.creatableSecondaryNetworkInterfaceKeys.add(key); if (deleteOptions != null) { this.secondaryNetworkInterfaceDeleteOptions.put(key, deleteOptions); } return this; } @Override public VirtualMachineImpl withExistingSecondaryNetworkInterface(NetworkInterface networkInterface) { this.existingSecondaryNetworkInterfacesToAssociate.add(networkInterface); return this; } @Override public VirtualMachineExtensionImpl defineNewExtension(String name) { return this.virtualMachineExtensions.define(name); } @Override public VirtualMachineImpl withoutSecondaryNetworkInterface(String name) { if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { int idx = -1; for (NetworkInterfaceReference nicReference : this.innerModel().networkProfile().networkInterfaces()) { idx++; if (!nicReference.primary() && name.equalsIgnoreCase(ResourceUtils.nameFromResourceId(nicReference.id()))) { this.innerModel().networkProfile().networkInterfaces().remove(idx); break; } } } return this; } @Override public VirtualMachineExtensionImpl updateExtension(String name) { return this.virtualMachineExtensions.update(name); } @Override public VirtualMachineImpl withoutExtension(String name) { this.virtualMachineExtensions.remove(name); return this; } @Override public VirtualMachineImpl withPlan(PurchasePlan plan) { this.innerModel().withPlan(new Plan()); this.innerModel().plan().withPublisher(plan.publisher()).withProduct(plan.product()).withName(plan.name()); return this; } @Override public VirtualMachineImpl withPromotionalPlan(PurchasePlan plan, String promotionCode) { this.withPlan(plan); this.innerModel().plan().withPromotionCode(promotionCode); return this; } @Override public VirtualMachineImpl withUnmanagedDisks() { this.isUnmanagedDiskSelected = true; return this; } @Override public VirtualMachineImpl withBootDiagnosticsOnManagedStorageAccount() { this.bootDiagnosticsHandler.withBootDiagnostics(true); return this; } @Override public VirtualMachineImpl withBootDiagnostics() { this.bootDiagnosticsHandler.withBootDiagnostics(false); return this; } @Override public VirtualMachineImpl withBootDiagnostics(Creatable<StorageAccount> creatable) { this.bootDiagnosticsHandler.withBootDiagnostics(creatable); return this; } @Override public VirtualMachineImpl withBootDiagnostics(String storageAccountBlobEndpointUri) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccountBlobEndpointUri); return this; } @Override public VirtualMachineImpl withBootDiagnostics(StorageAccount storageAccount) { this.bootDiagnosticsHandler.withBootDiagnostics(storageAccount); return this; } @Override public VirtualMachineImpl withoutBootDiagnostics() { this.bootDiagnosticsHandler.withoutBootDiagnostics(); return this; } @Override public VirtualMachineImpl withPriority(VirtualMachinePriorityTypes priority) { this.innerModel().withPriority(priority); return this; } @Override public VirtualMachineImpl withLowPriority() { this.withPriority(VirtualMachinePriorityTypes.LOW); return this; } @Override public VirtualMachineImpl withLowPriority(VirtualMachineEvictionPolicyTypes policy) { this.withLowPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withSpotPriority() { this.withPriority(VirtualMachinePriorityTypes.SPOT); return this; } @Override public VirtualMachineImpl withSpotPriority(VirtualMachineEvictionPolicyTypes policy) { this.withSpotPriority(); this.innerModel().withEvictionPolicy(policy); return this; } @Override public VirtualMachineImpl withMaxPrice(Double maxPrice) { this.innerModel().withBillingProfile(new BillingProfile().withMaxPrice(maxPrice)); return this; } @Override public VirtualMachineImpl withSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withoutSystemAssignedManagedServiceIdentity() { this.virtualMachineMsiHandler.withoutLocalManagedServiceIdentity(); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, BuiltInRole role) { this.virtualMachineMsiHandler.withAccessTo(resourceId, role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(BuiltInRole role) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(role); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessTo(String resourceId, String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessTo(resourceId, roleDefinitionId); return this; } @Override public VirtualMachineImpl withSystemAssignedIdentityBasedAccessToCurrentResourceGroup(String roleDefinitionId) { this.virtualMachineMsiHandler.withAccessToCurrentResourceGroup(roleDefinitionId); return this; } @Override public VirtualMachineImpl withNewUserAssignedManagedServiceIdentity(Creatable<Identity> creatableIdentity) { this.virtualMachineMsiHandler.withNewExternalManagedServiceIdentity(creatableIdentity); return this; } @Override public VirtualMachineImpl withExistingUserAssignedManagedServiceIdentity(Identity identity) { this.virtualMachineMsiHandler.withExistingExternalManagedServiceIdentity(identity); return this; } @Override public VirtualMachineImpl withoutUserAssignedManagedServiceIdentity(String identityId) { this.virtualMachineMsiHandler.withoutExternalManagedServiceIdentity(identityId); return this; } @Override public VirtualMachineImpl withLicenseType(String licenseType) { innerModel().withLicenseType(licenseType); return this; } @Override public VirtualMachineImpl enableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(true); return this; } @Override public VirtualMachineImpl disableHibernation() { if (this.innerModel().additionalCapabilities() == null) { this.innerModel().withAdditionalCapabilities(new AdditionalCapabilities()); } this.innerModel().additionalCapabilities().withHibernationEnabled(false); return this; } @Override public boolean isManagedDiskEnabled() { if (isOsDiskFromCustomImage(this.innerModel().storageProfile())) { return true; } if (isOSDiskAttachedManaged(this.innerModel().storageProfile().osDisk())) { return true; } if (isOSDiskFromStoredImage(this.innerModel().storageProfile())) { return false; } if (isOSDiskAttachedUnmanaged(this.innerModel().storageProfile().osDisk())) { return false; } if (isOSDiskFromPlatformImage(this.innerModel().storageProfile())) { if (this.isUnmanagedDiskSelected) { return false; } } if (isInCreateMode()) { return true; } else { return this.innerModel().storageProfile().osDisk().vhd() == null; } } @Override public String computerName() { if (innerModel().osProfile() == null) { return null; } return innerModel().osProfile().computerName(); } @Override public VirtualMachineSizeTypes size() { return innerModel().hardwareProfile().vmSize(); } @Override public OperatingSystemTypes osType() { if (innerModel().storageProfile().osDisk().osType() != null) { return innerModel().storageProfile().osDisk().osType(); } if (innerModel().osProfile() != null) { if (innerModel().osProfile().linuxConfiguration() != null) { return OperatingSystemTypes.LINUX; } if (innerModel().osProfile().windowsConfiguration() != null) { return OperatingSystemTypes.WINDOWS; } } return null; } @Override public String osUnmanagedDiskVhdUri() { if (isManagedDiskEnabled() || this.storageProfile().osDisk().vhd() == null) { return null; } return innerModel().storageProfile().osDisk().vhd().uri(); } @Override public CachingTypes osDiskCachingType() { return innerModel().storageProfile().osDisk().caching(); } @Override public int osDiskSize() { return ResourceManagerUtils.toPrimitiveInt(innerModel().storageProfile().osDisk().diskSizeGB()); } @Override public StorageAccountTypes osDiskStorageAccountType() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null) { return null; } return this.storageProfile().osDisk().managedDisk().storageAccountType(); } @Override public String osDiskId() { if (!isManagedDiskEnabled()) { return null; } return this.storageProfile().osDisk().managedDisk().id(); } @Override public DeleteOptions osDiskDeleteOptions() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().deleteOption() == null) { return null; } return DeleteOptions.fromString(this.storageProfile().osDisk().deleteOption().toString()); } @Override public String osDiskDiskEncryptionSetId() { if (!isManagedDiskEnabled() || this.storageProfile().osDisk().managedDisk() == null || this.storageProfile().osDisk().managedDisk().diskEncryptionSet() == null) { return null; } return this.storageProfile().osDisk().managedDisk().diskEncryptionSet().id(); } @Override public boolean isOSDiskEphemeral() { return this.storageProfile().osDisk().diffDiskSettings() != null && this.storageProfile().osDisk().diffDiskSettings().placement() != null; } @Override public Map<Integer, VirtualMachineUnmanagedDataDisk> unmanagedDataDisks() { Map<Integer, VirtualMachineUnmanagedDataDisk> dataDisks = new HashMap<>(); if (!isManagedDiskEnabled()) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { dataDisks.put(dataDisk.lun(), dataDisk); } } return Collections.unmodifiableMap(dataDisks); } @Override public Map<Integer, VirtualMachineDataDisk> dataDisks() { Map<Integer, VirtualMachineDataDisk> dataDisks = new HashMap<>(); if (isManagedDiskEnabled()) { List<DataDisk> innerDataDisks = this.innerModel().storageProfile().dataDisks(); if (innerDataDisks != null) { for (DataDisk innerDataDisk : innerDataDisks) { dataDisks.put(innerDataDisk.lun(), new VirtualMachineDataDiskImpl(innerDataDisk)); } } } return Collections.unmodifiableMap(dataDisks); } @Override public NetworkInterface getPrimaryNetworkInterface() { return this.getPrimaryNetworkInterfaceAsync().block(); } @Override public Mono<NetworkInterface> getPrimaryNetworkInterfaceAsync() { return this.networkManager.networkInterfaces().getByIdAsync(primaryNetworkInterfaceId()); } @Override public PublicIpAddress getPrimaryPublicIPAddress() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().getPublicIpAddress(); } @Override public String getPrimaryPublicIPAddressId() { return this.getPrimaryNetworkInterface().primaryIPConfiguration().publicIpAddressId(); } @Override public List<String> networkInterfaceIds() { List<String> nicIds = new ArrayList<>(); for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { nicIds.add(nicRef.id()); } return nicIds; } @Override public String primaryNetworkInterfaceId() { final List<NetworkInterfaceReference> nicRefs = this.innerModel().networkProfile().networkInterfaces(); String primaryNicRefId = null; if (nicRefs.size() == 1) { primaryNicRefId = nicRefs.get(0).id(); } else if (nicRefs.size() == 0) { primaryNicRefId = null; } else { for (NetworkInterfaceReference nicRef : innerModel().networkProfile().networkInterfaces()) { if (nicRef.primary() != null && nicRef.primary()) { primaryNicRefId = nicRef.id(); break; } } if (primaryNicRefId == null) { primaryNicRefId = nicRefs.get(0).id(); } } return primaryNicRefId; } @Override public String availabilitySetId() { if (innerModel().availabilitySet() != null) { return innerModel().availabilitySet().id(); } return null; } @Override public String virtualMachineScaleSetId() { if (innerModel().virtualMachineScaleSet() != null) { return innerModel().virtualMachineScaleSet().id(); } return null; } @Override public String provisioningState() { return innerModel().provisioningState(); } @Override public String licenseType() { return innerModel().licenseType(); } @Override public ProximityPlacementGroup proximityPlacementGroup() { if (innerModel().proximityPlacementGroup() == null) { return null; } else { ResourceId id = ResourceId.fromString(innerModel().proximityPlacementGroup().id()); ProximityPlacementGroupInner plgInner = manager() .serviceClient() .getProximityPlacementGroups() .getByResourceGroup(id.resourceGroupName(), id.name()); if (plgInner == null) { return null; } else { return new ProximityPlacementGroupImpl(plgInner); } } } @Override public Mono<List<VirtualMachineExtension>> listExtensionsAsync() { return this.virtualMachineExtensions.listAsync(); } @Override public Map<String, VirtualMachineExtension> listExtensions() { return this.virtualMachineExtensions.asMap(); } @Override public Plan plan() { return innerModel().plan(); } @Override public StorageProfile storageProfile() { return innerModel().storageProfile(); } @Override public OSProfile osProfile() { return innerModel().osProfile(); } @Override public DiagnosticsProfile diagnosticsProfile() { return innerModel().diagnosticsProfile(); } @Override public String vmId() { return innerModel().vmId(); } @Override public VirtualMachineInstanceView instanceView() { if (this.virtualMachineInstanceView == null) { this.refreshInstanceView(); } return this.virtualMachineInstanceView; } @Override public Set<AvailabilityZoneId> availabilityZones() { Set<AvailabilityZoneId> zones = new HashSet<>(); if (this.innerModel().zones() != null) { for (String zone : this.innerModel().zones()) { zones.add(AvailabilityZoneId.fromString(zone)); } } return Collections.unmodifiableSet(zones); } @Override public PowerState powerState() { return PowerState.fromInstanceView(this.instanceView()); } @Override public boolean isBootDiagnosticsEnabled() { return this.bootDiagnosticsHandler.isBootDiagnosticsEnabled(); } @Override public String bootDiagnosticsStorageUri() { return this.bootDiagnosticsHandler.bootDiagnosticsStorageUri(); } @Override public boolean isManagedServiceIdentityEnabled() { ResourceIdentityType type = this.managedServiceIdentityType(); return type != null && !type.equals(ResourceIdentityType.NONE); } @Override public String systemAssignedManagedServiceIdentityTenantId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().tenantId(); } return null; } @Override public String systemAssignedManagedServiceIdentityPrincipalId() { if (this.innerModel().identity() != null) { return this.innerModel().identity().principalId(); } return null; } @Override public ResourceIdentityType managedServiceIdentityType() { if (this.innerModel().identity() != null) { return this.innerModel().identity().type(); } return null; } @Override public Set<String> userAssignedManagedServiceIdentityIds() { if (this.innerModel().identity() != null && this.innerModel().identity().userAssignedIdentities() != null) { return Collections .unmodifiableSet(new HashSet<String>(this.innerModel().identity().userAssignedIdentities().keySet())); } return Collections.unmodifiableSet(new HashSet<String>()); } @Override public BillingProfile billingProfile() { return this.innerModel().billingProfile(); } @Override public boolean isHibernationEnabled() { return this.innerModel().additionalCapabilities() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().additionalCapabilities().hibernationEnabled()); } @Override public SecurityTypes securityType() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { return null; } return securityProfile.securityType(); } @Override public boolean isSecureBootEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().secureBootEnabled()); } @Override public boolean isVTpmEnabled() { return securityType() != null && this.innerModel().securityProfile().uefiSettings() != null && ResourceManagerUtils.toPrimitiveBoolean(this.innerModel().securityProfile().uefiSettings().vTpmEnabled()); } @Override public OffsetDateTime timeCreated() { return innerModel().timeCreated(); } @Override public DeleteOptions primaryNetworkInterfaceDeleteOptions() { String nicId = primaryNetworkInterfaceId(); return networkInterfaceDeleteOptions(nicId); } @Override public DeleteOptions networkInterfaceDeleteOptions(String networkInterfaceId) { if (CoreUtils.isNullOrEmpty(networkInterfaceId) || this.innerModel().networkProfile() == null || this.innerModel().networkProfile().networkInterfaces() == null) { return null; } return this.innerModel().networkProfile() .networkInterfaces() .stream() .filter(nic -> networkInterfaceId.equalsIgnoreCase(nic.id())) .findAny() .map(NetworkInterfaceReference::deleteOption) .orElse(null); } @Override public VirtualMachinePriorityTypes priority() { return this.innerModel().priority(); } @Override public VirtualMachineEvictionPolicyTypes evictionPolicy() { return this.innerModel().evictionPolicy(); } @Override public String userData() { return this.innerModel().userData(); } @Override public void beforeGroupCreateOrUpdate() { if (creatableStorageAccountKey == null && existingStorageAccountToAssociate == null) { if (osDiskRequiresImplicitStorageAccountCreation() || dataDisksRequiresImplicitStorageAccountCreation()) { Creatable<StorageAccount> storageAccountCreatable = null; if (this.creatableGroup != null) { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withNewResourceGroup(this.creatableGroup); } else { storageAccountCreatable = this .storageManager .storageAccounts() .define(this.namer.getRandomName("stg", 24).replace("-", "")) .withRegion(this.regionName()) .withExistingResourceGroup(this.resourceGroupName()); } this.creatableStorageAccountKey = this.addDependency(storageAccountCreatable); } } this.bootDiagnosticsHandler.prepare(); } @Override public Mono<VirtualMachine> createResourceAsync() { return prepareCreateResourceAsync() .flatMap( virtualMachine -> this .manager() .serviceClient() .getVirtualMachines() .createOrUpdateAsync(resourceGroupName(), vmName, innerModel()) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; })); } private Mono<VirtualMachine> prepareCreateResourceAsync() { setOSDiskDefaults(); setOSProfileDefaults(); setHardwareProfileDefaults(); if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); return this .createNewProximityPlacementGroupAsync() .map( virtualMachine -> { this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); this.virtualMachineMsiHandler.handleExternalIdentities(); return virtualMachine; }); } @Override public Mono<VirtualMachine> updateResourceAsync() { if (isManagedDiskEnabled()) { managedDataDisks.setDataDisksDefaults(); } else { UnmanagedDataDiskImpl.setDataDisksDefaults(this.unmanagedDataDisks, this.vmName); } this.handleUnManagedOSAndDataDisksStorageSettings(); this.bootDiagnosticsHandler.handleDiagnosticsSettings(); this.handleNetworkSettings(); this.handleAvailabilitySettings(); this.virtualMachineMsiHandler.processCreatedExternalIdentities(); VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); this.copyInnerToUpdateParameter(updateParameter); this.virtualMachineMsiHandler.handleExternalIdentities(updateParameter); final boolean vmModified = this.isVirtualMachineModifiedDuringUpdate(updateParameter); if (vmModified) { return this .manager() .serviceClient() .getVirtualMachines() .updateAsync(resourceGroupName(), vmName, updateParameter) .map( virtualMachineInner -> { reset(virtualMachineInner); return this; }); } else { return Mono.just(this); } } @Override public Mono<Void> afterPostRunAsync(boolean isGroupFaulted) { this.virtualMachineExtensions.clear(); if (isGroupFaulted) { return Mono.empty(); } else { return this.refreshAsync().then(); } } VirtualMachineImpl withExtension(VirtualMachineExtensionImpl extension) { this.virtualMachineExtensions.addExtension(extension); return this; } private void reset(VirtualMachineInner inner) { this.setInner(inner); clearCachedRelatedResources(); initializeDataDisks(); virtualMachineMsiHandler.clear(); creatableSecondaryNetworkInterfaceKeys.clear(); existingSecondaryNetworkInterfacesToAssociate.clear(); secondaryNetworkInterfaceDeleteOptions.clear(); primaryNetworkInterfaceDeleteOptions = null; } VirtualMachineImpl withUnmanagedDataDisk(UnmanagedDataDiskImpl dataDisk) { this.innerModel().storageProfile().dataDisks().add(dataDisk.innerModel()); this.unmanagedDataDisks.add(dataDisk); return this; } @Override public VirtualMachineImpl withAvailabilityZone(AvailabilityZoneId zoneId) { if (isInCreateMode()) { if (this.innerModel().zones() == null) { this.innerModel().withZones(new ArrayList<String>()); } this.innerModel().zones().add(zoneId.toString()); if (this.implicitPipCreatable != null) { this.implicitPipCreatable .withAvailabilityZone(zoneId) .withSku(PublicIPSkuType.STANDARD) .withStaticIP(); } } return this; } @Override public VirtualMachineImpl withOsDiskDeleteOptions(DeleteOptions deleteOptions) { if (deleteOptions == null || this.innerModel().storageProfile() == null || this.innerModel().storageProfile().osDisk() == null) { return null; } this.innerModel().storageProfile().osDisk().withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); return this; } @Override public VirtualMachineImpl withPrimaryNetworkInterfaceDeleteOptions(DeleteOptions deleteOptions) { this.primaryNetworkInterfaceDeleteOptions = deleteOptions; return this; } @Override public VirtualMachineImpl withNetworkInterfacesDeleteOptions(DeleteOptions deleteOptions, String... nicIds) { if (nicIds == null || nicIds.length == 0) { throw new IllegalArgumentException("No nicIds specified for `withNetworkInterfacesDeleteOptions`"); } if (this.innerModel().networkProfile() != null && this.innerModel().networkProfile().networkInterfaces() != null) { Set<String> nicIdSet = Arrays.stream(nicIds).map(nicId -> nicId.toLowerCase(Locale.ROOT)).collect(Collectors.toSet()); this.innerModel().networkProfile().networkInterfaces().forEach( nic -> { if (nicIdSet.contains(nic.id().toLowerCase(Locale.ROOT))) { nic.withDeleteOption(deleteOptions); } } ); } return this; } @Override public VirtualMachineImpl withDataDisksDeleteOptions(DeleteOptions deleteOptions, Integer... luns) { if (luns == null || luns.length == 0) { throw new IllegalArgumentException("No luns specified for `withDataDisksDeleteOptions`"); } Set<Integer> lunSet = Arrays.stream(luns).filter(Objects::nonNull).collect(Collectors.toSet()); if (lunSet.isEmpty()) { throw new IllegalArgumentException("No non-null luns specified for `withDataDisksDeleteOptions`"); } if (this.innerModel().storageProfile() != null && this.innerModel().storageProfile().dataDisks() != null) { this.innerModel().storageProfile().dataDisks().forEach( dataDisk -> { if (lunSet.contains(dataDisk.lun())) { dataDisk.withDeleteOption(diskDeleteOptionsFromDeleteOptions(deleteOptions)); } } ); } return this; } AzureEnvironment environment() { return manager().environment(); } private void setOSDiskDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (isOSDiskFromImage(osDisk)) { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() == null) { osDisk.withManagedDisk(new ManagedDiskParameters()); } if (osDisk.managedDisk().storageAccountType() == null) { osDisk.managedDisk().withStorageAccountType(StorageAccountTypes.STANDARD_LRS); } osDisk.withVhd(null); } else { if (isOSDiskFromPlatformImage(storageProfile) || isOSDiskFromStoredImage(storageProfile)) { if (osDisk.vhd() == null) { String osDiskVhdContainerName = "vhds"; String osDiskVhdName = this.vmName + "-os-disk-" + UUID.randomUUID().toString() + ".vhd"; withOSDiskVhdLocation(osDiskVhdContainerName, osDiskVhdName); } osDisk.withManagedDisk(null); } if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } else { if (isManagedDiskEnabled()) { if (osDisk.managedDisk() != null) { osDisk.managedDisk().withStorageAccountType(null); } osDisk.withVhd(null); } else { osDisk.withManagedDisk(null); if (osDisk.name() == null) { withOSDiskName(this.vmName + "-os-disk"); } } } if (osDisk.caching() == null) { withOSDiskCaching(CachingTypes.READ_WRITE); } } private void setOSProfileDefaults() { if (isInUpdateMode()) { return; } StorageProfile storageProfile = this.innerModel().storageProfile(); OSDisk osDisk = storageProfile.osDisk(); if (!removeOsProfile && isOSDiskFromImage(osDisk)) { if (osDisk.osType() == OperatingSystemTypes.LINUX || this.isMarketplaceLinuxImage) { OSProfile osProfile = this.innerModel().osProfile(); if (osProfile.linuxConfiguration() == null) { osProfile.withLinuxConfiguration(new LinuxConfiguration()); } this .innerModel() .osProfile() .linuxConfiguration() .withDisablePasswordAuthentication(osProfile.adminPassword() == null); } if (this.innerModel().osProfile().computerName() == null) { if (vmName.matches("[0-9]+")) { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } else if (vmName.length() <= 15) { this.innerModel().osProfile().withComputerName(vmName); } else { this.innerModel().osProfile().withComputerName(namer.getRandomName("vm", 15)); } } } else { this.innerModel().withOsProfile(null); } } private void setHardwareProfileDefaults() { if (!isInCreateMode()) { return; } HardwareProfile hardwareProfile = this.innerModel().hardwareProfile(); if (hardwareProfile.vmSize() == null) { hardwareProfile.withVmSize(VirtualMachineSizeTypes.BASIC_A0); } } /** Prepare virtual machine disks profile (StorageProfile). */ private void handleUnManagedOSAndDataDisksStorageSettings() { if (isManagedDiskEnabled()) { return; } StorageAccount storageAccount = null; if (this.creatableStorageAccountKey != null) { storageAccount = this.taskResult(this.creatableStorageAccountKey); } else if (this.existingStorageAccountToAssociate != null) { storageAccount = this.existingStorageAccountToAssociate; } if (isInCreateMode()) { if (storageAccount != null) { if (isOSDiskFromPlatformImage(innerModel().storageProfile())) { String uri = innerModel() .storageProfile() .osDisk() .vhd() .uri() .replaceFirst("\\{storage-base-url}", storageAccount.endPoints().primary().blob()); innerModel().storageProfile().osDisk().vhd().withUri(uri); } UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } } else { if (storageAccount != null) { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, storageAccount, vmName); } else { UnmanagedDataDiskImpl.ensureDisksVhdUri(unmanagedDataDisks, vmName); } } } private Mono<VirtualMachineImpl> createNewProximityPlacementGroupAsync() { if (isInCreateMode()) { if (this.newProximityPlacementGroupName != null && !this.newProximityPlacementGroupName.isEmpty()) { ProximityPlacementGroupInner plgInner = new ProximityPlacementGroupInner(); plgInner.withProximityPlacementGroupType(this.newProximityPlacementGroupType); plgInner.withLocation(this.innerModel().location()); return this .manager() .serviceClient() .getProximityPlacementGroups() .createOrUpdateAsync(this.resourceGroupName(), this.newProximityPlacementGroupName, plgInner) .map( createdPlgInner -> { this .innerModel() .withProximityPlacementGroup(new SubResource().withId(createdPlgInner.id())); return this; }); } } return Mono.just(this); } private void handleNetworkSettings() { if (isInCreateMode()) { NetworkInterface primaryNetworkInterface = null; if (this.creatablePrimaryNetworkInterfaceKey != null) { primaryNetworkInterface = this.taskResult(this.creatablePrimaryNetworkInterfaceKey); } else if (this.existingPrimaryNetworkInterfaceToAssociate != null) { primaryNetworkInterface = this.existingPrimaryNetworkInterfaceToAssociate; } if (primaryNetworkInterface != null) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(true); nicReference.withId(primaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } if (this.primaryNetworkInterfaceDeleteOptions != null) { String primaryNetworkInterfaceId = primaryNetworkInterfaceId(); if (primaryNetworkInterfaceId != null) { this.innerModel().networkProfile().networkInterfaces().stream() .filter(nic -> primaryNetworkInterfaceId.equals(nic.id())) .forEach(nic -> nic.withDeleteOption(this.primaryNetworkInterfaceDeleteOptions)); } } for (String creatableSecondaryNetworkInterfaceKey : this.creatableSecondaryNetworkInterfaceKeys) { NetworkInterface secondaryNetworkInterface = this.taskResult(creatableSecondaryNetworkInterfaceKey); NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); if (secondaryNetworkInterfaceDeleteOptions.containsKey(creatableSecondaryNetworkInterfaceKey)) { DeleteOptions deleteOptions = secondaryNetworkInterfaceDeleteOptions.get(creatableSecondaryNetworkInterfaceKey); nicReference.withDeleteOption(deleteOptions); } this.innerModel().networkProfile().networkInterfaces().add(nicReference); } for (NetworkInterface secondaryNetworkInterface : this.existingSecondaryNetworkInterfacesToAssociate) { NetworkInterfaceReference nicReference = new NetworkInterfaceReference(); nicReference.withPrimary(false); nicReference.withId(secondaryNetworkInterface.id()); this.innerModel().networkProfile().networkInterfaces().add(nicReference); } } private void handleAvailabilitySettings() { if (!isInCreateMode()) { return; } AvailabilitySet availabilitySet = null; if (this.creatableAvailabilitySetKey != null) { availabilitySet = this.taskResult(this.creatableAvailabilitySetKey); } else if (this.existingAvailabilitySetToAssociate != null) { availabilitySet = this.existingAvailabilitySetToAssociate; } if (availabilitySet != null) { if (this.innerModel().availabilitySet() == null) { this.innerModel().withAvailabilitySet(new SubResource()); } this.innerModel().availabilitySet().withId(availabilitySet.id()); } } private boolean osDiskRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || !isInCreateMode()) { return false; } return isOSDiskFromPlatformImage(this.innerModel().storageProfile()); } private boolean dataDisksRequiresImplicitStorageAccountCreation() { if (isManagedDiskEnabled()) { return false; } if (this.creatableStorageAccountKey != null || this.existingStorageAccountToAssociate != null || this.unmanagedDataDisks.size() == 0) { return false; } boolean hasEmptyVhd = false; for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.EMPTY || dataDisk.creationMethod() == DiskCreateOptionTypes.FROM_IMAGE) { if (dataDisk.innerModel().vhd() == null) { hasEmptyVhd = true; break; } } } if (isInCreateMode()) { return hasEmptyVhd; } if (hasEmptyVhd) { for (VirtualMachineUnmanagedDataDisk dataDisk : this.unmanagedDataDisks) { if (dataDisk.creationMethod() == DiskCreateOptionTypes.ATTACH && dataDisk.innerModel().vhd() != null) { return false; } } return true; } return false; } /** * Checks whether the OS disk is directly attached to a unmanaged VHD. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a unmanaged VHD, false otherwise */ private boolean isOSDiskAttachedUnmanaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.vhd() != null && osDisk.vhd().uri() != null; } /** * Checks whether the OS disk is directly attached to a managed disk. * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is attached to a managed disk, false otherwise */ private boolean isOSDiskAttachedManaged(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.ATTACH && osDisk.managedDisk() != null && osDisk.managedDisk().id() != null; } /** * Checks whether the OS disk is based on an image (image from PIR or custom image [captured, bringYourOwnFeature]). * * @param osDisk the osDisk value in the storage profile * @return true if the OS disk is configured to use image from PIR or custom image */ private boolean isOSDiskFromImage(OSDisk osDisk) { return osDisk.createOption() == DiskCreateOptionTypes.FROM_IMAGE; } /** * Checks whether the OS disk is based on an platform image (image in PIR). * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on platform image. */ private boolean isOSDiskFromPlatformImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.publisher() != null && imageReference.offer() != null && imageReference.sku() != null && imageReference.version() != null; } /** * Checks whether the OS disk is based on a CustomImage. * * <p>A custom image is represented by {@link VirtualMachineCustomImage}. * * @param storageProfile the storage profile * @return true if the OS disk is configured to be based on custom image. */ private boolean isOsDiskFromCustomImage(StorageProfile storageProfile) { ImageReference imageReference = storageProfile.imageReference(); return isOSDiskFromImage(storageProfile.osDisk()) && imageReference != null && imageReference.id() != null; } /** * Checks whether the OS disk is based on a stored image ('captured' or 'bring your own feature'). * * <p>A stored image is created by calling {@link VirtualMachine * * @param storageProfile the storage profile * @return true if the OS disk is configured to use custom image ('captured' or 'bring your own feature') */ private boolean isOSDiskFromStoredImage(StorageProfile storageProfile) { OSDisk osDisk = storageProfile.osDisk(); return isOSDiskFromImage(osDisk) && osDisk.image() != null && osDisk.image().uri() != null; } private String temporaryBlobUrl(String containerName, String blobName) { return "{storage-base-url}" + containerName + "/" + blobName; } private NetworkInterface.DefinitionStages.WithPrimaryPublicIPAddress prepareNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionWithNetwork; if (this.creatableGroup != null) { definitionWithNetwork = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionWithNetwork = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionWithNetwork.withNewPrimaryNetwork("vnet" + name).withPrimaryPrivateIPAddressDynamic(); } private void initializeDataDisks() { if (this.innerModel().storageProfile().dataDisks() == null) { this.innerModel().storageProfile().withDataDisks(new ArrayList<>()); } this.isUnmanagedDiskSelected = false; this.managedDataDisks.clear(); this.unmanagedDataDisks = new ArrayList<>(); if (!isManagedDiskEnabled()) { for (DataDisk dataDiskInner : this.storageProfile().dataDisks()) { this.unmanagedDataDisks.add(new UnmanagedDataDiskImpl(dataDiskInner, this)); } } } private NetworkInterface.DefinitionStages.WithPrimaryNetwork preparePrimaryNetworkInterface(String name) { NetworkInterface.DefinitionStages.WithGroup definitionWithGroup = this.networkManager.networkInterfaces().define(name).withRegion(this.regionName()); NetworkInterface.DefinitionStages.WithPrimaryNetwork definitionAfterGroup; if (this.creatableGroup != null) { definitionAfterGroup = definitionWithGroup.withNewResourceGroup(this.creatableGroup); } else { definitionAfterGroup = definitionWithGroup.withExistingResourceGroup(this.resourceGroupName()); } return definitionAfterGroup; } private void clearCachedRelatedResources() { this.virtualMachineInstanceView = null; } private void throwIfManagedDiskEnabled(String message) { if (this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private void throwIfManagedDiskDisabled(String message) { if (!this.isManagedDiskEnabled()) { throw logger.logExceptionAsError(new UnsupportedOperationException(message)); } } private boolean isInUpdateMode() { return !this.isInCreateMode(); } private DiskDeleteOptionTypes diskDeleteOptionsFromDeleteOptions(DeleteOptions deleteOptions) { return deleteOptions == null ? null : DiskDeleteOptionTypes.fromString(deleteOptions.toString()); } boolean isVirtualMachineModifiedDuringUpdate(VirtualMachineUpdateInner updateParameter) { if (updateParameterSnapshotOnUpdate == null || updateParameter == null) { return true; } else { try { String jsonStrSnapshot = SERIALIZER_ADAPTER.serialize(updateParameterSnapshotOnUpdate, SerializerEncoding.JSON); String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); return !jsonStr.equals(jsonStrSnapshot); } catch (IOException e) { return true; } } } VirtualMachineUpdateInner deepCopyInnerToUpdateParameter() { VirtualMachineUpdateInner updateParameter = new VirtualMachineUpdateInner(); copyInnerToUpdateParameter(updateParameter); try { String jsonStr = SERIALIZER_ADAPTER.serialize(updateParameter, SerializerEncoding.JSON); updateParameter = SERIALIZER_ADAPTER.deserialize(jsonStr, VirtualMachineUpdateInner.class, SerializerEncoding.JSON); } catch (IOException e) { return null; } if (this.innerModel().identity() != null) { VirtualMachineIdentity identity = new VirtualMachineIdentity(); identity.withType(this.innerModel().identity().type()); updateParameter.withIdentity(identity); } return updateParameter; } private void copyInnerToUpdateParameter(VirtualMachineUpdateInner updateParameter) { updateParameter.withHardwareProfile(this.innerModel().hardwareProfile()); updateParameter.withStorageProfile(this.innerModel().storageProfile()); updateParameter.withOsProfile(this.innerModel().osProfile()); updateParameter.withNetworkProfile(this.innerModel().networkProfile()); updateParameter.withDiagnosticsProfile(this.innerModel().diagnosticsProfile()); updateParameter.withBillingProfile(this.innerModel().billingProfile()); updateParameter.withSecurityProfile(this.innerModel().securityProfile()); updateParameter.withAdditionalCapabilities(this.innerModel().additionalCapabilities()); updateParameter.withAvailabilitySet(this.innerModel().availabilitySet()); updateParameter.withLicenseType(this.innerModel().licenseType()); updateParameter.withZones(this.innerModel().zones()); updateParameter.withTags(this.innerModel().tags()); updateParameter.withProximityPlacementGroup(this.innerModel().proximityPlacementGroup()); updateParameter.withPriority(this.innerModel().priority()); updateParameter.withEvictionPolicy(this.innerModel().evictionPolicy()); updateParameter.withUserData(this.innerModel().userData()); } RoleAssignmentHelper.IdProvider idProvider() { return new RoleAssignmentHelper.IdProvider() { @Override public String principalId() { if (innerModel() != null && innerModel().identity() != null) { return innerModel().identity().principalId(); } else { return null; } } @Override public String resourceId() { if (innerModel() != null) { return innerModel().id(); } else { return null; } } }; } @Override public VirtualMachineImpl withPlacement(DiffDiskPlacement placement) { if (placement != null) { this.innerModel().storageProfile().osDisk().diffDiskSettings().withPlacement(placement); } return this; } @Override public VirtualMachineImpl withExistingVirtualMachineScaleSet(VirtualMachineScaleSet scaleSet) { if (scaleSet != null) { this.innerModel().withVirtualMachineScaleSet(new SubResource().withId(scaleSet.id())); } return this; } @Override public VirtualMachineImpl withOSDisk(String diskId) { if (diskId == null) { return this; } if (!isManagedDiskEnabled() || this.innerModel().storageProfile().osDisk().managedDisk() == null) { return this; } OSDisk osDisk = new OSDisk() .withCreateOption(this.innerModel().storageProfile().osDisk().createOption()); osDisk.withManagedDisk(new ManagedDiskParameters().withId(diskId)); this.storageProfile().withOsDisk(osDisk); this.storageProfile().osDisk().managedDisk().withId(diskId); return this; } @Override public VirtualMachineImpl withOSDisk(Disk disk) { if (disk == null) { return this; } return withOSDisk(disk.id()); } @Override public VirtualMachineImpl withTrustedLaunch() { ensureSecurityProfile().withSecurityType(SecurityTypes.TRUSTED_LAUNCH); return this; } @Override public VirtualMachineImpl withSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(true); return this; } @Override public VirtualMachineImpl withoutSecureBoot() { if (securityType() == null) { return this; } ensureUefiSettings().withSecureBootEnabled(false); return this; } @Override public VirtualMachineImpl withVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(true); return this; } @Override public VirtualMachineImpl withoutVTpm() { if (securityType() == null) { return this; } ensureUefiSettings().withVTpmEnabled(false); return this; } private SecurityProfile ensureSecurityProfile() { SecurityProfile securityProfile = this.innerModel().securityProfile(); if (securityProfile == null) { securityProfile = new SecurityProfile(); this.innerModel().withSecurityProfile(securityProfile); } return securityProfile; } private UefiSettings ensureUefiSettings() { UefiSettings uefiSettings = ensureSecurityProfile().uefiSettings(); if (uefiSettings == null) { uefiSettings = new UefiSettings(); ensureSecurityProfile().withUefiSettings(uefiSettings); } return uefiSettings; } /** Class to manage Data disk collection. */ private class ManagedDataDiskCollection { private final Map<String, DataDisk> newDisksToAttach = new HashMap<>(); private final List<DataDisk> existingDisksToAttach = new ArrayList<>(); private final List<DataDisk> implicitDisksToAssociate = new ArrayList<>(); private final List<Integer> diskLunsToRemove = new ArrayList<>(); private final List<DataDisk> newDisksFromImage = new ArrayList<>(); private final VirtualMachineImpl vm; private CachingTypes defaultCachingType; private StorageAccountTypes defaultStorageAccountType; private DiskDeleteOptionTypes defaultDeleteOptions; private DiskEncryptionSetParameters defaultDiskEncryptionSet; ManagedDataDiskCollection(VirtualMachineImpl vm) { this.vm = vm; } void setDefaultCachingType(CachingTypes cachingType) { this.defaultCachingType = cachingType; } void setDefaultDeleteOptions(DiskDeleteOptionTypes deleteOptions) { this.defaultDeleteOptions = deleteOptions; } void setDefaultStorageAccountType(StorageAccountTypes defaultStorageAccountType) { this.defaultStorageAccountType = defaultStorageAccountType; } void setDefaultEncryptionSet(String diskEncryptionSetId) { this.defaultDiskEncryptionSet = new DiskEncryptionSetParameters().withId(diskEncryptionSetId); } void setDataDisksDefaults() { VirtualMachineInner vmInner = this.vm.innerModel(); if (isPending()) { if (vmInner.storageProfile().dataDisks() == null) { vmInner.storageProfile().withDataDisks(new ArrayList<>()); } List<DataDisk> dataDisks = vmInner.storageProfile().dataDisks(); final List<Integer> usedLuns = new ArrayList<>(); for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksToAttach.values()) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.existingDisksToAttach) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.implicitDisksToAssociate) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } for (DataDisk dataDisk : this.newDisksFromImage) { if (dataDisk.lun() != -1) { usedLuns.add(dataDisk.lun()); } } Callable<Integer> nextLun = () -> { Integer lun = 0; while (usedLuns.contains(lun)) { lun++; } usedLuns.add(lun); return lun; }; try { setAttachableNewDataDisks(nextLun); setAttachableExistingDataDisks(nextLun); setImplicitDataDisks(nextLun); } catch (Exception ex) { throw logger.logExceptionAsError(Exceptions.propagate(ex)); } setImageBasedDataDisks(); removeDataDisks(); } if (vmInner.storageProfile().dataDisks() != null && vmInner.storageProfile().dataDisks().size() == 0) { if (vm.isInCreateMode()) { vmInner.storageProfile().withDataDisks(null); } } this.clear(); } private void clear() { newDisksToAttach.clear(); existingDisksToAttach.clear(); implicitDisksToAssociate.clear(); diskLunsToRemove.clear(); newDisksFromImage.clear(); defaultCachingType = null; defaultStorageAccountType = null; defaultDeleteOptions = null; defaultDiskEncryptionSet = null; } private boolean isPending() { return newDisksToAttach.size() > 0 || existingDisksToAttach.size() > 0 || implicitDisksToAssociate.size() > 0 || diskLunsToRemove.size() > 0 || newDisksFromImage.size() > 0; } private void setDefaultDiskEncryptionSetOptions(DataDisk dataDisk) { if (getDefaultDiskEncryptionSetOptions() != null) { if (dataDisk.managedDisk() != null && dataDisk.managedDisk().diskEncryptionSet() != null) { if (dataDisk.managedDisk().diskEncryptionSet().id() == null) { dataDisk.managedDisk().withDiskEncryptionSet(null); } } else { if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } dataDisk.managedDisk().withDiskEncryptionSet(getDefaultDiskEncryptionSetOptions()); } } } private void setAttachableNewDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Map.Entry<String, DataDisk> entry : this.newDisksToAttach.entrySet()) { Disk managedDisk = vm.taskResult(entry.getKey()); DataDisk dataDisk = entry.getValue(); dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } dataDisk.withManagedDisk(new ManagedDiskParameters()); dataDisk.managedDisk().withId(managedDisk.id()); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setAttachableExistingDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.existingDisksToAttach) { dataDisk.withCreateOption(DiskCreateOptionTypes.ATTACH); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImplicitDataDisks(Callable<Integer> nextLun) throws Exception { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.implicitDisksToAssociate) { dataDisk.withCreateOption(DiskCreateOptionTypes.EMPTY); if (dataDisk.lun() == -1) { dataDisk.withLun(nextLun.call()); } if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.managedDisk() == null) { dataDisk.withManagedDisk(new ManagedDiskParameters()); } if (dataDisk.managedDisk().storageAccountType() == null) { dataDisk.managedDisk().withStorageAccountType(getDefaultStorageAccountType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void setImageBasedDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (DataDisk dataDisk : this.newDisksFromImage) { dataDisk.withCreateOption(DiskCreateOptionTypes.FROM_IMAGE); if (dataDisk.caching() == null) { dataDisk.withCaching(getDefaultCachingType()); } if (dataDisk.deleteOption() == null) { dataDisk.withDeleteOption(getDefaultDeleteOptions()); } setDefaultDiskEncryptionSetOptions(dataDisk); dataDisk.withName(null); dataDisks.add(dataDisk); } } private void removeDataDisks() { List<DataDisk> dataDisks = vm.innerModel().storageProfile().dataDisks(); for (Integer lun : this.diskLunsToRemove) { int indexToRemove = 0; for (DataDisk dataDisk : dataDisks) { if (dataDisk.lun() == lun) { dataDisks.remove(indexToRemove); break; } indexToRemove++; } } } private CachingTypes getDefaultCachingType() { if (defaultCachingType == null) { return CachingTypes.READ_WRITE; } return defaultCachingType; } private StorageAccountTypes getDefaultStorageAccountType() { if (defaultStorageAccountType == null) { return StorageAccountTypes.STANDARD_LRS; } return defaultStorageAccountType; } private DiskDeleteOptionTypes getDefaultDeleteOptions() { return defaultDeleteOptions; } private DiskEncryptionSetParameters getDefaultDiskEncryptionSetOptions() { return defaultDiskEncryptionSet; } } /** Class to manage VM boot diagnostics settings. */ private class BootDiagnosticsHandler { private final VirtualMachineImpl vmImpl; private String creatableDiagnosticsStorageAccountKey; private boolean useManagedStorageAccount = false; BootDiagnosticsHandler(VirtualMachineImpl vmImpl) { this.vmImpl = vmImpl; if (isBootDiagnosticsEnabled() && this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri() == null) { this.useManagedStorageAccount = true; } } public boolean isBootDiagnosticsEnabled() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null && this.vmInner().diagnosticsProfile().bootDiagnostics().enabled() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().enabled(); } return false; } public String bootDiagnosticsStorageUri() { if (this.vmInner().diagnosticsProfile() != null && this.vmInner().diagnosticsProfile().bootDiagnostics() != null) { return this.vmInner().diagnosticsProfile().bootDiagnostics().storageUri(); } return null; } BootDiagnosticsHandler withBootDiagnostics(boolean useManagedStorageAccount) { this.enableDisable(true); this.useManagedStorageAccount = useManagedStorageAccount; return this; } BootDiagnosticsHandler withBootDiagnostics(Creatable<StorageAccount> creatable) { this.enableDisable(true); this.useManagedStorageAccount = false; this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(creatable); return this; } BootDiagnosticsHandler withBootDiagnostics(String storageAccountBlobEndpointUri) { this.enableDisable(true); this.useManagedStorageAccount = false; this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(storageAccountBlobEndpointUri); return this; } BootDiagnosticsHandler withBootDiagnostics(StorageAccount storageAccount) { return this.withBootDiagnostics(storageAccount.endPoints().primary().blob()); } BootDiagnosticsHandler withoutBootDiagnostics() { this.enableDisable(false); this.useManagedStorageAccount = false; return this; } void prepare() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } if (this.creatableDiagnosticsStorageAccountKey != null || this.vmImpl.creatableStorageAccountKey != null || this.vmImpl.existingStorageAccountToAssociate != null) { return; } String accountName = this.vmImpl.namer.getRandomName("stg", 24).replace("-", ""); Creatable<StorageAccount> storageAccountCreatable; if (this.vmImpl.creatableGroup != null) { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withNewResourceGroup(this.vmImpl.creatableGroup); } else { storageAccountCreatable = this .vmImpl .storageManager .storageAccounts() .define(accountName) .withRegion(this.vmImpl.regionName()) .withExistingResourceGroup(this.vmImpl.resourceGroupName()); } this.creatableDiagnosticsStorageAccountKey = this.vmImpl.addDependency(storageAccountCreatable); } void handleDiagnosticsSettings() { if (useManagedStorageAccount) { return; } DiagnosticsProfile diagnosticsProfile = this.vmInner().diagnosticsProfile(); if (diagnosticsProfile == null || diagnosticsProfile.bootDiagnostics() == null || diagnosticsProfile.bootDiagnostics().storageUri() != null) { return; } boolean enableBD = ResourceManagerUtils.toPrimitiveBoolean(diagnosticsProfile.bootDiagnostics().enabled()); if (!enableBD) { return; } StorageAccount storageAccount = null; if (creatableDiagnosticsStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(creatableDiagnosticsStorageAccountKey); } else if (this.vmImpl.creatableStorageAccountKey != null) { storageAccount = this.vmImpl.<StorageAccount>taskResult(this.vmImpl.creatableStorageAccountKey); } else if (this.vmImpl.existingStorageAccountToAssociate != null) { storageAccount = this.vmImpl.existingStorageAccountToAssociate; } if (storageAccount == null) { throw logger .logExceptionAsError( new IllegalStateException( "Unable to retrieve expected storageAccount instance for BootDiagnostics")); } vmInner() .diagnosticsProfile() .bootDiagnostics() .withStorageUri(storageAccount.endPoints().primary().blob()); } private VirtualMachineInner vmInner() { return this.vmImpl.innerModel(); } private void enableDisable(boolean enable) { if (this.vmInner().diagnosticsProfile() == null) { this.vmInner().withDiagnosticsProfile(new DiagnosticsProfile()); } if (this.vmInner().diagnosticsProfile().bootDiagnostics() == null) { this.vmInner().diagnosticsProfile().withBootDiagnostics(new BootDiagnostics()); } if (enable) { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(true); } else { this.vmInner().diagnosticsProfile().bootDiagnostics().withEnabled(false); this.vmInner().diagnosticsProfile().bootDiagnostics().withStorageUri(null); } } } }
Leaving comment to track reverting this TODO before merge.
private void resetSessionContainerIfNeeded(DatabaseAccount databaseAccount) { boolean isRegionScopingOfSessionTokensPossible = this.isRegionScopingOfSessionTokensPossible(databaseAccount, this.useMultipleWriteLocations, this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig); this.sessionContainer = new RegionScopedSessionContainer(this.serviceEndpoint.getHost(), this.sessionCapturingDisabled, this.globalEndpointManager); this.diagnosticsClientConfig.withRegionScopedSessionContainerOptions((RegionScopedSessionContainer) this.sessionContainer); }
private void resetSessionContainerIfNeeded(DatabaseAccount databaseAccount) { boolean isRegionScopingOfSessionTokensPossible = this.isRegionScopingOfSessionTokensPossible(databaseAccount, this.useMultipleWriteLocations, this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig); if (isRegionScopingOfSessionTokensPossible) { this.sessionContainer = new RegionScopedSessionContainer(this.serviceEndpoint.getHost(), this.sessionCapturingDisabled, this.globalEndpointManager); this.diagnosticsClientConfig.withRegionScopedSessionContainerOptions((RegionScopedSessionContainer) this.sessionContainer); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.FeedResponseHelper.FeedResponseAccessor feedResponseAccessor = ImplementationBridgeHelpers.FeedResponseHelper.getFeedResponseAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private final static ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor itemResponseAccessor = ImplementationBridgeHelpers.CosmosItemResponseHelper.getCosmosItemResponseBuilderAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final CosmosItemSerializer defaultCustomSerializer; private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private ISessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; private final boolean sessionCapturingDisabled; private final boolean isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; this.defaultCustomSerializer = defaultCustomSerializer; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionCapturingDisabled = disableSessionCapturing; this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig = isRegionScopedSessionCapturingEnabled; this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private DatabaseAccount initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); return databaseAccount; } private boolean isRegionScopingOfSessionTokensPossible(DatabaseAccount databaseAccount, boolean useMultipleWriteLocations, boolean isRegionScopedSessionCapturingEnabled) { if (!isRegionScopedSessionCapturingEnabled) { return false; } if (!useMultipleWriteLocations) { return false; } Iterable<DatabaseAccountLocation> readableLocationsIterable = databaseAccount.getReadableLocations(); Iterator<DatabaseAccountLocation> readableLocationsIterator = readableLocationsIterable.iterator(); while (readableLocationsIterator.hasNext()) { DatabaseAccountLocation readableLocation = readableLocationsIterator.next(); String normalizedReadableRegion = readableLocation.getName().toLowerCase(Locale.ROOT).trim().replace(" ", ""); if (RegionNameToRegionIdMap.getRegionId(normalizedReadableRegion) == -1) { return false; } } return true; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); (this.gatewayProxy).setSessionContainer(this.sessionContainer); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); DatabaseAccount databaseAccountSnapshot = this.initializeGatewayConfigurationReader(); this.resetSessionContainerIfNeeded(databaseAccountSnapshot); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init().thenEmpty((publisher) -> { logger.warn( "Initialized DocumentClient [{}] with machineId[{}]" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", clientId, ClientTelemetry.getMachineId(diagnosticsClientConfig), serviceEndpoint, connectionPolicy, consistencyLevel); }).subscribe(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return ClientTelemetry.getMachineId(diagnosticsClientConfig); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = database.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return qryOptAccessor.getImpl(options).getOperationContextAndListenerTuple(); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getImpl(nonNullQueryOptions) .getCorrelationActivityId(); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, resourceTypeEnum, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, offer.getOfferAutoScaleSettings().toJson()); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.setPartitionKeyDefinition(partitionKeyDefinition); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, options.getEffectiveItemSerializer(), trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setPartitionKeyDefinition(collection.getPartitionKey()); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache .resolveCollectionAsync(metadataDiagnosticsCtx, request) .flatMap(documentCollectionValueHolder -> { if (documentCollectionValueHolder.v != null) { request.setPartitionKeyDefinition(documentCollectionValueHolder.v.getPartitionKey()); } return Mono.just(documentCollectionValueHolder); }) ) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = Document.fromObject(document, options.getEffectiveItemSerializer()); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); Consumer<Map<String, Object>> onAfterSerialization = null; if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { onAfterSerialization = (node) -> node.put(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = document.serializeJsonToByteBuffer(options.getEffectiveItemSerializer(), onAfterSerialization); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig( RequestOptions options, ResourceType resourceType, OperationType operationType) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null, resourceType, operationType); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig, ResourceType resourceType, OperationType operationType) { if (policyConfig != null) { return policyConfig; } if (resourceType != ResourceType.Document) { return null; } if (!operationType.isPointOperation() && Configs.isDefaultE2ETimeoutDisabledForNonPointOperations()) { return null; } return this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<T>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<T>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), klass, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<T> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults()); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); List<String> partitionKeySelectors = createPkSelectors(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelectors.size() == 1 && partitionKeySelectors.get(0).equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelectors); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<CosmosItemIdentity> idPartitionKeyPairList) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec( List<CosmosItemIdentity> itemIdentities, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(pkValueAsPartitionKey).toObjectArray(); List<List<String>> partitionKeyParams = new ArrayList<>(); int pathCount = 0; for (Object pkComponentValue : pkValues) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(partitionKeySelectors.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, pkComponentValue)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(pkParam.get(0)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private List<String> createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.toList()); } private <T> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, ResourceType.Document, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<T>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { CosmosItemSerializer effectiveItemSerializer = getEffectiveItemSerializer(queryRequestOptions); return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<T> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = itemResponseAccessor.createCosmosItemResponse(resourceResponse, klass, effectiveItemSerializer); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(cosmosItemResponse.getItem()), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } @Override public CosmosItemSerializer getEffectiveItemSerializer(CosmosItemSerializer requestOptionsItemSerializer) { if (requestOptionsItemSerializer != null) { return requestOptionsItemSerializer; } if (this.defaultCustomSerializer != null) { return this.defaultCustomSerializer; } return CosmosItemSerializer.DEFAULT_SERIALIZER; } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = queryRequestOptions != null ? queryRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosItemRequestOptions itemRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = itemRequestOptions != null ? itemRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { return RxDocumentClientImpl.this.getEffectiveItemSerializer(queryRequestOptions); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); List<String> partitionKeySelectors = createPkSelectors(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, partitionKeySelectors); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> feedResponseAccessor.createFeedResponse( response, CosmosItemSerializer.DEFAULT_SERIALIZER, klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public ISessionContainer getSession() { return this.sessionContainer; } public void setSession(ISessionContainer sessionContainer) { this.sessionContainer = sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(partitionKey).toObjectArray(); String pkParamNamePrefix = "@pkValue"; for (int i = 0; i < pkValues.length; i++) { StringBuilder subQueryStringBuilder = new StringBuilder(); String sqlParameterName = pkParamNamePrefix + i; if (i > 0) { subQueryStringBuilder.append(" AND "); } subQueryStringBuilder.append(" c"); subQueryStringBuilder.append(partitionKeySelectors.get(i)); subQueryStringBuilder.append((" = ")); subQueryStringBuilder.append(sqlParameterName); parameters.add(new SqlParameter(sqlParameterName, pkValues[i])); queryStringBuilder.append(subQueryStringBuilder); } return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions, resourceType, operationType); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig(), resourceType, operationType); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.FeedResponseHelper.FeedResponseAccessor feedResponseAccessor = ImplementationBridgeHelpers.FeedResponseHelper.getFeedResponseAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private final static ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor itemResponseAccessor = ImplementationBridgeHelpers.CosmosItemResponseHelper.getCosmosItemResponseBuilderAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final CosmosItemSerializer defaultCustomSerializer; private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private ISessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; private final boolean sessionCapturingDisabled; private final boolean isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; this.defaultCustomSerializer = defaultCustomSerializer; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionCapturingDisabled = disableSessionCapturing; this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig = isRegionScopedSessionCapturingEnabled; this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private DatabaseAccount initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); return databaseAccount; } private boolean isRegionScopingOfSessionTokensPossible(DatabaseAccount databaseAccount, boolean useMultipleWriteLocations, boolean isRegionScopedSessionCapturingEnabled) { if (!isRegionScopedSessionCapturingEnabled) { return false; } if (!useMultipleWriteLocations) { return false; } Iterable<DatabaseAccountLocation> readableLocationsIterable = databaseAccount.getReadableLocations(); Iterator<DatabaseAccountLocation> readableLocationsIterator = readableLocationsIterable.iterator(); while (readableLocationsIterator.hasNext()) { DatabaseAccountLocation readableLocation = readableLocationsIterator.next(); String normalizedReadableRegion = readableLocation.getName().toLowerCase(Locale.ROOT).trim().replace(" ", ""); if (RegionNameToRegionIdMap.getRegionId(normalizedReadableRegion) == -1) { return false; } } return true; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); (this.gatewayProxy).setSessionContainer(this.sessionContainer); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); DatabaseAccount databaseAccountSnapshot = this.initializeGatewayConfigurationReader(); this.resetSessionContainerIfNeeded(databaseAccountSnapshot); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init().thenEmpty((publisher) -> { logger.warn( "Initialized DocumentClient [{}] with machineId[{}]" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", clientId, ClientTelemetry.getMachineId(diagnosticsClientConfig), serviceEndpoint, connectionPolicy, consistencyLevel); }).subscribe(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return ClientTelemetry.getMachineId(diagnosticsClientConfig); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = database.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return qryOptAccessor.getImpl(options).getOperationContextAndListenerTuple(); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getImpl(nonNullQueryOptions) .getCorrelationActivityId(); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, resourceTypeEnum, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, offer.getOfferAutoScaleSettings().toJson()); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.setPartitionKeyDefinition(partitionKeyDefinition); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, options.getEffectiveItemSerializer(), trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setPartitionKeyDefinition(collection.getPartitionKey()); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache .resolveCollectionAsync(metadataDiagnosticsCtx, request) .flatMap(documentCollectionValueHolder -> { if (documentCollectionValueHolder.v != null) { request.setPartitionKeyDefinition(documentCollectionValueHolder.v.getPartitionKey()); } return Mono.just(documentCollectionValueHolder); }) ) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = Document.fromObject(document, options.getEffectiveItemSerializer()); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); Consumer<Map<String, Object>> onAfterSerialization = null; if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { onAfterSerialization = (node) -> node.put(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = document.serializeJsonToByteBuffer(options.getEffectiveItemSerializer(), onAfterSerialization); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig( RequestOptions options, ResourceType resourceType, OperationType operationType) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null, resourceType, operationType); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig, ResourceType resourceType, OperationType operationType) { if (policyConfig != null) { return policyConfig; } if (resourceType != ResourceType.Document) { return null; } if (!operationType.isPointOperation() && Configs.isDefaultE2ETimeoutDisabledForNonPointOperations()) { return null; } return this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<T>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<T>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), klass, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<T> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults()); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); List<String> partitionKeySelectors = createPkSelectors(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelectors.size() == 1 && partitionKeySelectors.get(0).equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelectors); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<CosmosItemIdentity> idPartitionKeyPairList) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec( List<CosmosItemIdentity> itemIdentities, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(pkValueAsPartitionKey).toObjectArray(); List<List<String>> partitionKeyParams = new ArrayList<>(); int pathCount = 0; for (Object pkComponentValue : pkValues) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(partitionKeySelectors.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, pkComponentValue)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(pkParam.get(0)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private List<String> createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.toList()); } private <T> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, ResourceType.Document, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<T>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { CosmosItemSerializer effectiveItemSerializer = getEffectiveItemSerializer(queryRequestOptions); return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<T> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = itemResponseAccessor.createCosmosItemResponse(resourceResponse, klass, effectiveItemSerializer); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(cosmosItemResponse.getItem()), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } @Override public CosmosItemSerializer getEffectiveItemSerializer(CosmosItemSerializer requestOptionsItemSerializer) { if (requestOptionsItemSerializer != null) { return requestOptionsItemSerializer; } if (this.defaultCustomSerializer != null) { return this.defaultCustomSerializer; } return CosmosItemSerializer.DEFAULT_SERIALIZER; } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = queryRequestOptions != null ? queryRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosItemRequestOptions itemRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = itemRequestOptions != null ? itemRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { return RxDocumentClientImpl.this.getEffectiveItemSerializer(queryRequestOptions); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); List<String> partitionKeySelectors = createPkSelectors(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, partitionKeySelectors); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> feedResponseAccessor.createFeedResponse( response, CosmosItemSerializer.DEFAULT_SERIALIZER, klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public ISessionContainer getSession() { return this.sessionContainer; } public void setSession(ISessionContainer sessionContainer) { this.sessionContainer = sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(partitionKey).toObjectArray(); String pkParamNamePrefix = "@pkValue"; for (int i = 0; i < pkValues.length; i++) { StringBuilder subQueryStringBuilder = new StringBuilder(); String sqlParameterName = pkParamNamePrefix + i; if (i > 0) { subQueryStringBuilder.append(" AND "); } subQueryStringBuilder.append(" c"); subQueryStringBuilder.append(partitionKeySelectors.get(i)); subQueryStringBuilder.append((" = ")); subQueryStringBuilder.append(sqlParameterName); parameters.add(new SqlParameter(sqlParameterName, pkValues[i])); queryStringBuilder.append(subQueryStringBuilder); } return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions, resourceType, operationType); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig(), resourceType, operationType); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
Fixed in next iteration.
private void resetSessionContainerIfNeeded(DatabaseAccount databaseAccount) { boolean isRegionScopingOfSessionTokensPossible = this.isRegionScopingOfSessionTokensPossible(databaseAccount, this.useMultipleWriteLocations, this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig); this.sessionContainer = new RegionScopedSessionContainer(this.serviceEndpoint.getHost(), this.sessionCapturingDisabled, this.globalEndpointManager); this.diagnosticsClientConfig.withRegionScopedSessionContainerOptions((RegionScopedSessionContainer) this.sessionContainer); }
private void resetSessionContainerIfNeeded(DatabaseAccount databaseAccount) { boolean isRegionScopingOfSessionTokensPossible = this.isRegionScopingOfSessionTokensPossible(databaseAccount, this.useMultipleWriteLocations, this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig); if (isRegionScopingOfSessionTokensPossible) { this.sessionContainer = new RegionScopedSessionContainer(this.serviceEndpoint.getHost(), this.sessionCapturingDisabled, this.globalEndpointManager); this.diagnosticsClientConfig.withRegionScopedSessionContainerOptions((RegionScopedSessionContainer) this.sessionContainer); } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.FeedResponseHelper.FeedResponseAccessor feedResponseAccessor = ImplementationBridgeHelpers.FeedResponseHelper.getFeedResponseAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private final static ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor itemResponseAccessor = ImplementationBridgeHelpers.CosmosItemResponseHelper.getCosmosItemResponseBuilderAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final CosmosItemSerializer defaultCustomSerializer; private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private ISessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; private final boolean sessionCapturingDisabled; private final boolean isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; this.defaultCustomSerializer = defaultCustomSerializer; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionCapturingDisabled = disableSessionCapturing; this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig = isRegionScopedSessionCapturingEnabled; this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private DatabaseAccount initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); return databaseAccount; } private boolean isRegionScopingOfSessionTokensPossible(DatabaseAccount databaseAccount, boolean useMultipleWriteLocations, boolean isRegionScopedSessionCapturingEnabled) { if (!isRegionScopedSessionCapturingEnabled) { return false; } if (!useMultipleWriteLocations) { return false; } Iterable<DatabaseAccountLocation> readableLocationsIterable = databaseAccount.getReadableLocations(); Iterator<DatabaseAccountLocation> readableLocationsIterator = readableLocationsIterable.iterator(); while (readableLocationsIterator.hasNext()) { DatabaseAccountLocation readableLocation = readableLocationsIterator.next(); String normalizedReadableRegion = readableLocation.getName().toLowerCase(Locale.ROOT).trim().replace(" ", ""); if (RegionNameToRegionIdMap.getRegionId(normalizedReadableRegion) == -1) { return false; } } return true; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); (this.gatewayProxy).setSessionContainer(this.sessionContainer); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); DatabaseAccount databaseAccountSnapshot = this.initializeGatewayConfigurationReader(); this.resetSessionContainerIfNeeded(databaseAccountSnapshot); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init().thenEmpty((publisher) -> { logger.warn( "Initialized DocumentClient [{}] with machineId[{}]" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", clientId, ClientTelemetry.getMachineId(diagnosticsClientConfig), serviceEndpoint, connectionPolicy, consistencyLevel); }).subscribe(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return ClientTelemetry.getMachineId(diagnosticsClientConfig); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = database.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return qryOptAccessor.getImpl(options).getOperationContextAndListenerTuple(); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getImpl(nonNullQueryOptions) .getCorrelationActivityId(); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, resourceTypeEnum, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, offer.getOfferAutoScaleSettings().toJson()); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.setPartitionKeyDefinition(partitionKeyDefinition); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, options.getEffectiveItemSerializer(), trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setPartitionKeyDefinition(collection.getPartitionKey()); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache .resolveCollectionAsync(metadataDiagnosticsCtx, request) .flatMap(documentCollectionValueHolder -> { if (documentCollectionValueHolder.v != null) { request.setPartitionKeyDefinition(documentCollectionValueHolder.v.getPartitionKey()); } return Mono.just(documentCollectionValueHolder); }) ) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = Document.fromObject(document, options.getEffectiveItemSerializer()); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); Consumer<Map<String, Object>> onAfterSerialization = null; if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { onAfterSerialization = (node) -> node.put(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = document.serializeJsonToByteBuffer(options.getEffectiveItemSerializer(), onAfterSerialization); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig( RequestOptions options, ResourceType resourceType, OperationType operationType) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null, resourceType, operationType); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig, ResourceType resourceType, OperationType operationType) { if (policyConfig != null) { return policyConfig; } if (resourceType != ResourceType.Document) { return null; } if (!operationType.isPointOperation() && Configs.isDefaultE2ETimeoutDisabledForNonPointOperations()) { return null; } return this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<T>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<T>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), klass, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<T> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults()); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); List<String> partitionKeySelectors = createPkSelectors(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelectors.size() == 1 && partitionKeySelectors.get(0).equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelectors); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<CosmosItemIdentity> idPartitionKeyPairList) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec( List<CosmosItemIdentity> itemIdentities, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(pkValueAsPartitionKey).toObjectArray(); List<List<String>> partitionKeyParams = new ArrayList<>(); int pathCount = 0; for (Object pkComponentValue : pkValues) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(partitionKeySelectors.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, pkComponentValue)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(pkParam.get(0)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private List<String> createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.toList()); } private <T> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, ResourceType.Document, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<T>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { CosmosItemSerializer effectiveItemSerializer = getEffectiveItemSerializer(queryRequestOptions); return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<T> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = itemResponseAccessor.createCosmosItemResponse(resourceResponse, klass, effectiveItemSerializer); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(cosmosItemResponse.getItem()), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } @Override public CosmosItemSerializer getEffectiveItemSerializer(CosmosItemSerializer requestOptionsItemSerializer) { if (requestOptionsItemSerializer != null) { return requestOptionsItemSerializer; } if (this.defaultCustomSerializer != null) { return this.defaultCustomSerializer; } return CosmosItemSerializer.DEFAULT_SERIALIZER; } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = queryRequestOptions != null ? queryRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosItemRequestOptions itemRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = itemRequestOptions != null ? itemRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { return RxDocumentClientImpl.this.getEffectiveItemSerializer(queryRequestOptions); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); List<String> partitionKeySelectors = createPkSelectors(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, partitionKeySelectors); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> feedResponseAccessor.createFeedResponse( response, CosmosItemSerializer.DEFAULT_SERIALIZER, klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public ISessionContainer getSession() { return this.sessionContainer; } public void setSession(ISessionContainer sessionContainer) { this.sessionContainer = sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(partitionKey).toObjectArray(); String pkParamNamePrefix = "@pkValue"; for (int i = 0; i < pkValues.length; i++) { StringBuilder subQueryStringBuilder = new StringBuilder(); String sqlParameterName = pkParamNamePrefix + i; if (i > 0) { subQueryStringBuilder.append(" AND "); } subQueryStringBuilder.append(" c"); subQueryStringBuilder.append(partitionKeySelectors.get(i)); subQueryStringBuilder.append((" = ")); subQueryStringBuilder.append(sqlParameterName); parameters.add(new SqlParameter(sqlParameterName, pkValues[i])); queryStringBuilder.append(subQueryStringBuilder); } return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions, resourceType, operationType); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig(), resourceType, operationType); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
class RxDocumentClientImpl implements AsyncDocumentClient, IAuthorizationTokenProvider, CpuMemoryListener, DiagnosticsClientContext { private final static List<String> EMPTY_REGION_LIST = Collections.emptyList(); private final static List<URI> EMPTY_ENDPOINT_LIST = Collections.emptyList(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnosticsAccessor diagnosticsAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsHelper.getCosmosDiagnosticsAccessor(); private final static ImplementationBridgeHelpers.FeedResponseHelper.FeedResponseAccessor feedResponseAccessor = ImplementationBridgeHelpers.FeedResponseHelper.getFeedResponseAccessor(); private final static ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.CosmosClientTelemetryConfigAccessor telemetryCfgAccessor = ImplementationBridgeHelpers.CosmosClientTelemetryConfigHelper.getCosmosClientTelemetryConfigAccessor(); private final static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor = ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); private final static ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.CosmosQueryRequestOptionsAccessor qryOptAccessor = ImplementationBridgeHelpers.CosmosQueryRequestOptionsHelper.getCosmosQueryRequestOptionsAccessor(); private final static ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor itemResponseAccessor = ImplementationBridgeHelpers.CosmosItemResponseHelper.getCosmosItemResponseBuilderAccessor(); private static final String tempMachineId = "uuid:" + UUID.randomUUID(); private static final AtomicInteger activeClientsCnt = new AtomicInteger(0); private static final Map<String, Integer> clientMap = new ConcurrentHashMap<>(); private static final AtomicInteger clientIdGenerator = new AtomicInteger(0); private static final Range<String> RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES = new Range<>( PartitionKeyInternalHelper.MinimumInclusiveEffectivePartitionKey, PartitionKeyInternalHelper.MaximumExclusiveEffectivePartitionKey, true, false); private static final String DUMMY_SQL_QUERY = "this is dummy and only used in creating " + "ParallelDocumentQueryExecutioncontext, but not used"; private final static ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final CosmosItemSerializer defaultCustomSerializer; private final static Logger logger = LoggerFactory.getLogger(RxDocumentClientImpl.class); private final String masterKeyOrResourceToken; private final URI serviceEndpoint; private final ConnectionPolicy connectionPolicy; private final ConsistencyLevel consistencyLevel; private final BaseAuthorizationTokenProvider authorizationTokenProvider; private final UserAgentContainer userAgentContainer; private final boolean hasAuthKeyResourceToken; private final Configs configs; private final boolean connectionSharingAcrossClientsEnabled; private AzureKeyCredential credential; private final TokenCredential tokenCredential; private String[] tokenCredentialScopes; private SimpleTokenCache tokenCredentialCache; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; AuthorizationTokenType authorizationTokenType; private ISessionContainer sessionContainer; private String firstResourceTokenFromPermissionFeed = StringUtils.EMPTY; private RxClientCollectionCache collectionCache; private RxGatewayStoreModel gatewayProxy; private RxStoreModel storeModel; private GlobalAddressResolver addressResolver; private RxPartitionKeyRangeCache partitionKeyRangeCache; private Map<String, List<PartitionKeyAndResourceTokenPair>> resourceTokensMap; private final boolean contentResponseOnWriteEnabled; private final Map<String, PartitionedQueryExecutionInfo> queryPlanCache; private final AtomicBoolean closed = new AtomicBoolean(false); private final int clientId; private ClientTelemetry clientTelemetry; private final ApiType apiType; private final CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); private IRetryPolicyFactory resetSessionTokenRetryPolicy; /** * Compatibility mode: Allows to specify compatibility mode used by client when * making query requests. Should be removed when application/sql is no longer * supported. */ private final QueryCompatibilityMode queryCompatibilityMode = QueryCompatibilityMode.Default; private final GlobalEndpointManager globalEndpointManager; private final RetryPolicy retryPolicy; private HttpClient reactorHttpClient; private Function<HttpClient, HttpClient> httpClientInterceptor; private volatile boolean useMultipleWriteLocations; private StoreClientFactory storeClientFactory; private GatewayServiceConfigurationReader gatewayConfigurationReader; private final DiagnosticsClientConfig diagnosticsClientConfig; private final AtomicBoolean throughputControlEnabled; private ThroughputControlStore throughputControlStore; private final CosmosClientTelemetryConfig clientTelemetryConfig; private final String clientCorrelationId; private final SessionRetryOptions sessionRetryOptions; private final boolean sessionCapturingOverrideEnabled; private final boolean sessionCapturingDisabled; private final boolean isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig; public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, null, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } public RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverride, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, permissionFeed, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverride, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); this.cosmosAuthorizationTokenResolver = cosmosAuthorizationTokenResolver; } private RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, List<Permission> permissionFeed, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { this( serviceEndpoint, masterKeyOrResourceToken, connectionPolicy, consistencyLevel, configs, credential, tokenCredential, sessionCapturingOverrideEnabled, connectionSharingAcrossClientsEnabled, contentResponseOnWriteEnabled, metadataCachesSnapshot, apiType, clientTelemetryConfig, clientCorrelationId, cosmosEndToEndOperationLatencyPolicyConfig, sessionRetryOptions, containerProactiveInitConfig, defaultCustomSerializer, isRegionScopedSessionCapturingEnabled); if (permissionFeed != null && permissionFeed.size() > 0) { this.resourceTokensMap = new HashMap<>(); for (Permission permission : permissionFeed) { String[] segments = StringUtils.split(permission.getResourceLink(), Constants.Properties.PATH_SEPARATOR.charAt(0)); if (segments.length == 0) { throw new IllegalArgumentException("resourceLink"); } List<PartitionKeyAndResourceTokenPair> partitionKeyAndResourceTokenPairs = null; PathInfo pathInfo = new PathInfo(false, StringUtils.EMPTY, StringUtils.EMPTY, false); if (!PathsHelper.tryParsePathSegments(permission.getResourceLink(), pathInfo, null)) { throw new IllegalArgumentException(permission.getResourceLink()); } partitionKeyAndResourceTokenPairs = resourceTokensMap.get(pathInfo.resourceIdOrFullName); if (partitionKeyAndResourceTokenPairs == null) { partitionKeyAndResourceTokenPairs = new ArrayList<>(); this.resourceTokensMap.put(pathInfo.resourceIdOrFullName, partitionKeyAndResourceTokenPairs); } PartitionKey partitionKey = permission.getResourcePartitionKey(); partitionKeyAndResourceTokenPairs.add(new PartitionKeyAndResourceTokenPair( partitionKey != null ? BridgeInternal.getPartitionKeyInternal(partitionKey) : PartitionKeyInternal.Empty, permission.getToken())); logger.debug("Initializing resource token map , with map key [{}] , partition key [{}] and resource token [{}]", pathInfo.resourceIdOrFullName, partitionKey != null ? partitionKey.toString() : null, permission.getToken()); } if(this.resourceTokensMap.isEmpty()) { throw new IllegalArgumentException("permissionFeed"); } String firstToken = permissionFeed.get(0).getToken(); if(ResourceTokenAuthorizationHelper.isResourceToken(firstToken)) { this.firstResourceTokenFromPermissionFeed = firstToken; } } } RxDocumentClientImpl(URI serviceEndpoint, String masterKeyOrResourceToken, ConnectionPolicy connectionPolicy, ConsistencyLevel consistencyLevel, Configs configs, AzureKeyCredential credential, TokenCredential tokenCredential, boolean sessionCapturingOverrideEnabled, boolean connectionSharingAcrossClientsEnabled, boolean contentResponseOnWriteEnabled, CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, ApiType apiType, CosmosClientTelemetryConfig clientTelemetryConfig, String clientCorrelationId, CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig, SessionRetryOptions sessionRetryOptions, CosmosContainerProactiveInitConfig containerProactiveInitConfig, CosmosItemSerializer defaultCustomSerializer, boolean isRegionScopedSessionCapturingEnabled) { assert(clientTelemetryConfig != null); Boolean clientTelemetryEnabled = ImplementationBridgeHelpers .CosmosClientTelemetryConfigHelper .getCosmosClientTelemetryConfigAccessor() .isSendClientTelemetryToServiceEnabled(clientTelemetryConfig); assert(clientTelemetryEnabled != null); activeClientsCnt.incrementAndGet(); this.clientId = clientIdGenerator.incrementAndGet(); this.clientCorrelationId = Strings.isNullOrWhiteSpace(clientCorrelationId) ? String.format("%05d",this.clientId): clientCorrelationId; clientMap.put(serviceEndpoint.toString(), clientMap.getOrDefault(serviceEndpoint.toString(), 0) + 1); this.diagnosticsClientConfig = new DiagnosticsClientConfig(); this.diagnosticsClientConfig.withClientId(this.clientId); this.diagnosticsClientConfig.withActiveClientCounter(activeClientsCnt); this.diagnosticsClientConfig.withClientMap(clientMap); this.diagnosticsClientConfig.withConnectionSharingAcrossClientsEnabled(connectionSharingAcrossClientsEnabled); this.diagnosticsClientConfig.withConsistency(consistencyLevel); this.throughputControlEnabled = new AtomicBoolean(false); this.cosmosEndToEndOperationLatencyPolicyConfig = cosmosEndToEndOperationLatencyPolicyConfig; this.diagnosticsClientConfig.withEndToEndOperationLatencyPolicy(cosmosEndToEndOperationLatencyPolicyConfig); this.sessionRetryOptions = sessionRetryOptions; this.defaultCustomSerializer = defaultCustomSerializer; logger.info( "Initializing DocumentClient [{}] with" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", this.clientId, serviceEndpoint, connectionPolicy, consistencyLevel, configs.getProtocol()); try { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; this.configs = configs; this.masterKeyOrResourceToken = masterKeyOrResourceToken; this.serviceEndpoint = serviceEndpoint; this.credential = credential; this.tokenCredential = tokenCredential; this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; this.authorizationTokenType = AuthorizationTokenType.Invalid; if (this.credential != null) { hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else if (masterKeyOrResourceToken != null && ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.authorizationTokenProvider = null; hasAuthKeyResourceToken = true; this.authorizationTokenType = AuthorizationTokenType.ResourceToken; } else if(masterKeyOrResourceToken != null && !ResourceTokenAuthorizationHelper.isResourceToken(masterKeyOrResourceToken)) { this.credential = new AzureKeyCredential(this.masterKeyOrResourceToken); hasAuthKeyResourceToken = false; this.authorizationTokenType = AuthorizationTokenType.PrimaryMasterKey; this.authorizationTokenProvider = new BaseAuthorizationTokenProvider(this.credential); } else { hasAuthKeyResourceToken = false; this.authorizationTokenProvider = null; if (tokenCredential != null) { this.tokenCredentialScopes = new String[] { serviceEndpoint.getScheme() + ": }; this.tokenCredentialCache = new SimpleTokenCache(() -> this.tokenCredential .getToken(new TokenRequestContext().addScopes(this.tokenCredentialScopes))); this.authorizationTokenType = AuthorizationTokenType.AadToken; } } if (connectionPolicy != null) { this.connectionPolicy = connectionPolicy; } else { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); } this.diagnosticsClientConfig.withConnectionMode(this.getConnectionPolicy().getConnectionMode()); this.diagnosticsClientConfig.withConnectionPolicy(this.connectionPolicy); this.diagnosticsClientConfig.withMultipleWriteRegionsEnabled(this.connectionPolicy.isMultipleWriteRegionsEnabled()); this.diagnosticsClientConfig.withEndpointDiscoveryEnabled(this.connectionPolicy.isEndpointDiscoveryEnabled()); this.diagnosticsClientConfig.withPreferredRegions(this.connectionPolicy.getPreferredRegions()); this.diagnosticsClientConfig.withMachineId(tempMachineId); this.diagnosticsClientConfig.withProactiveContainerInitConfig(containerProactiveInitConfig); this.diagnosticsClientConfig.withSessionRetryOptions(sessionRetryOptions); this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; boolean disableSessionCapturing = (ConsistencyLevel.SESSION != consistencyLevel && !sessionCapturingOverrideEnabled); this.sessionCapturingDisabled = disableSessionCapturing; this.consistencyLevel = consistencyLevel; this.userAgentContainer = new UserAgentContainer(); String userAgentSuffix = this.connectionPolicy.getUserAgentSuffix(); if (userAgentSuffix != null && userAgentSuffix.length() > 0) { userAgentContainer.setSuffix(userAgentSuffix); } this.httpClientInterceptor = null; this.reactorHttpClient = httpClient(); this.globalEndpointManager = new GlobalEndpointManager(asDatabaseAccountManagerInternal(), this.connectionPolicy, /**/configs); this.isRegionScopedSessionCapturingEnabledOnClientOrSystemConfig = isRegionScopedSessionCapturingEnabled; this.sessionContainer = new SessionContainer(this.serviceEndpoint.getHost(), disableSessionCapturing); this.retryPolicy = new RetryPolicy(this, this.globalEndpointManager, this.connectionPolicy); this.resetSessionTokenRetryPolicy = retryPolicy; CpuMemoryMonitor.register(this); this.queryPlanCache = new ConcurrentHashMap<>(); this.apiType = apiType; this.clientTelemetryConfig = clientTelemetryConfig; } catch (RuntimeException e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } @Override public DiagnosticsClientConfig getConfig() { return diagnosticsClientConfig; } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = diagnosticsAccessor.create(this, telemetryCfgAccessor.getSamplingRate(this.clientTelemetryConfig)); this.mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } private DatabaseAccount initializeGatewayConfigurationReader() { this.gatewayConfigurationReader = new GatewayServiceConfigurationReader(this.globalEndpointManager); DatabaseAccount databaseAccount = this.globalEndpointManager.getLatestDatabaseAccount(); if (databaseAccount == null) { Throwable databaseRefreshErrorSnapshot = this.globalEndpointManager.getLatestDatabaseRefreshError(); if (databaseRefreshErrorSnapshot != null) { logger.error("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot ); throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: databaseRefreshErrorSnapshot); } else { logger.error("Client initialization failed." + " Check if the endpoint is reachable and if your auth token is valid. More info: https: throw new RuntimeException("Client initialization failed. Check if the endpoint is reachable and if your auth token " + "is valid. More info: https: } } this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount); return databaseAccount; } private boolean isRegionScopingOfSessionTokensPossible(DatabaseAccount databaseAccount, boolean useMultipleWriteLocations, boolean isRegionScopedSessionCapturingEnabled) { if (!isRegionScopedSessionCapturingEnabled) { return false; } if (!useMultipleWriteLocations) { return false; } Iterable<DatabaseAccountLocation> readableLocationsIterable = databaseAccount.getReadableLocations(); Iterator<DatabaseAccountLocation> readableLocationsIterator = readableLocationsIterable.iterator(); while (readableLocationsIterator.hasNext()) { DatabaseAccountLocation readableLocation = readableLocationsIterator.next(); String normalizedReadableRegion = readableLocation.getName().toLowerCase(Locale.ROOT).trim().replace(" ", ""); if (RegionNameToRegionIdMap.getRegionId(normalizedReadableRegion) == -1) { return false; } } return true; } private void updateGatewayProxy() { (this.gatewayProxy).setGatewayServiceConfigurationReader(this.gatewayConfigurationReader); (this.gatewayProxy).setCollectionCache(this.collectionCache); (this.gatewayProxy).setPartitionKeyRangeCache(this.partitionKeyRangeCache); (this.gatewayProxy).setUseMultipleWriteLocations(this.useMultipleWriteLocations); (this.gatewayProxy).setSessionContainer(this.sessionContainer); } public void init(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot, Function<HttpClient, HttpClient> httpClientInterceptor) { try { this.httpClientInterceptor = httpClientInterceptor; if (httpClientInterceptor != null) { this.reactorHttpClient = httpClientInterceptor.apply(httpClient()); } this.gatewayProxy = createRxGatewayProxy(this.sessionContainer, this.consistencyLevel, this.queryCompatibilityMode, this.userAgentContainer, this.globalEndpointManager, this.reactorHttpClient, this.apiType); this.globalEndpointManager.init(); DatabaseAccount databaseAccountSnapshot = this.initializeGatewayConfigurationReader(); this.resetSessionContainerIfNeeded(databaseAccountSnapshot); if (metadataCachesSnapshot != null) { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy, metadataCachesSnapshot.getCollectionInfoByNameCache(), metadataCachesSnapshot.getCollectionInfoByIdCache() ); } else { this.collectionCache = new RxClientCollectionCache(this, this.sessionContainer, this.gatewayProxy, this, this.retryPolicy); } this.resetSessionTokenRetryPolicy = new ResetSessionTokenRetryPolicyFactory(this.sessionContainer, this.collectionCache, this.retryPolicy); this.partitionKeyRangeCache = new RxPartitionKeyRangeCache(RxDocumentClientImpl.this, collectionCache); updateGatewayProxy(); clientTelemetry = new ClientTelemetry( this, null, randomUuid().toString(), ManagementFactory.getRuntimeMXBean().getName(), connectionPolicy.getConnectionMode(), globalEndpointManager.getLatestDatabaseAccount().getId(), null, null, this.configs, this.clientTelemetryConfig, this, this.connectionPolicy.getPreferredRegions()); clientTelemetry.init().thenEmpty((publisher) -> { logger.warn( "Initialized DocumentClient [{}] with machineId[{}]" + " serviceEndpoint [{}], connectionPolicy [{}], consistencyLevel [{}]", clientId, ClientTelemetry.getMachineId(diagnosticsClientConfig), serviceEndpoint, connectionPolicy, consistencyLevel); }).subscribe(); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.GATEWAY) { this.storeModel = this.gatewayProxy; } else { this.initializeDirectConnectivity(); } this.retryPolicy.setRxCollectionCache(this.collectionCache); ConsistencyLevel effectiveConsistencyLevel = consistencyLevel != null ? consistencyLevel : this.getDefaultConsistencyLevelOfAccount(); boolean updatedDisableSessionCapturing = (ConsistencyLevel.SESSION != effectiveConsistencyLevel && !sessionCapturingOverrideEnabled); this.sessionContainer.setDisableSessionCapturing(updatedDisableSessionCapturing); } catch (Exception e) { logger.error("unexpected failure in initializing client.", e); close(); throw e; } } public void serialize(CosmosClientMetadataCachesSnapshot state) { RxCollectionCache.serialize(state, this.collectionCache); } private void initializeDirectConnectivity() { this.addressResolver = new GlobalAddressResolver(this, this.reactorHttpClient, this.globalEndpointManager, this.configs.getProtocol(), this, this.collectionCache, this.partitionKeyRangeCache, userAgentContainer, null, this.connectionPolicy, this.apiType); this.storeClientFactory = new StoreClientFactory( this.addressResolver, this.diagnosticsClientConfig, this.configs, this.connectionPolicy, this.userAgentContainer, this.connectionSharingAcrossClientsEnabled, this.clientTelemetry, this.globalEndpointManager); this.createStoreModel(true); } DatabaseAccountManagerInternal asDatabaseAccountManagerInternal() { return new DatabaseAccountManagerInternal() { @Override public URI getServiceEndpoint() { return RxDocumentClientImpl.this.getServiceEndpoint(); } @Override public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { logger.info("Getting database account endpoint from {}", endpoint); return RxDocumentClientImpl.this.getDatabaseAccountFromEndpoint(endpoint); } @Override public ConnectionPolicy getConnectionPolicy() { return RxDocumentClientImpl.this.getConnectionPolicy(); } }; } RxGatewayStoreModel createRxGatewayProxy(ISessionContainer sessionContainer, ConsistencyLevel consistencyLevel, QueryCompatibilityMode queryCompatibilityMode, UserAgentContainer userAgentContainer, GlobalEndpointManager globalEndpointManager, HttpClient httpClient, ApiType apiType) { return new RxGatewayStoreModel( this, sessionContainer, consistencyLevel, queryCompatibilityMode, userAgentContainer, globalEndpointManager, httpClient, apiType); } private HttpClient httpClient() { HttpClientConfig httpClientConfig = new HttpClientConfig(this.configs) .withMaxIdleConnectionTimeout(this.connectionPolicy.getIdleHttpConnectionTimeout()) .withPoolSize(this.connectionPolicy.getMaxConnectionPoolSize()) .withProxy(this.connectionPolicy.getProxy()) .withNetworkRequestTimeout(this.connectionPolicy.getHttpNetworkRequestTimeout()); if (connectionSharingAcrossClientsEnabled) { return SharedGatewayHttpClient.getOrCreateInstance(httpClientConfig, diagnosticsClientConfig); } else { diagnosticsClientConfig.withGatewayHttpClientConfig(httpClientConfig.toDiagnosticsString()); return HttpClient.createFixed(httpClientConfig); } } private void createStoreModel(boolean subscribeRntbdStatus) { StoreClient storeClient = this.storeClientFactory.createStoreClient(this, this.addressResolver, this.sessionContainer, this.gatewayConfigurationReader, this, this.useMultipleWriteLocations, this.sessionRetryOptions); this.storeModel = new ServerStoreModel(storeClient); } @Override public URI getServiceEndpoint() { return this.serviceEndpoint; } @Override public ConnectionPolicy getConnectionPolicy() { return this.connectionPolicy; } @Override public boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } @Override public ConsistencyLevel getConsistencyLevel() { return consistencyLevel; } @Override public ClientTelemetry getClientTelemetry() { return this.clientTelemetry; } @Override public String getClientCorrelationId() { return this.clientCorrelationId; } @Override public String getMachineId() { if (this.diagnosticsClientConfig == null) { return null; } return ClientTelemetry.getMachineId(diagnosticsClientConfig); } @Override public String getUserAgent() { return this.userAgentContainer.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return mostRecentlyCreatedDiagnostics.get(); } @Override public Mono<ResourceResponse<Database>> createDatabase(Database database, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createDatabaseInternal(database, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> createDatabaseInternal(Database database, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (database == null) { throw new IllegalArgumentException("Database"); } logger.debug("Creating a Database. id: [{}]", database.getId()); validateResource(database); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = database.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.DATABASE_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.Database, Paths.DATABASES_ROOT, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in creating a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> deleteDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> deleteDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Deleting a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in deleting a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Database>> readDatabase(String databaseLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readDatabaseInternal(databaseLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Database>> readDatabaseInternal(String databaseLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } logger.debug("Reading a Database. databaseLink: [{}]", databaseLink); String path = Utils.joinPath(databaseLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Database, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Database, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Database.class)); } catch (Exception e) { logger.debug("Failure in reading a database. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Database>> readDatabases(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Database, Database.class, Paths.DATABASES_ROOT); } private String parentResourceLinkToQueryLink(String parentResourceLink, ResourceType resourceTypeEnum) { switch (resourceTypeEnum) { case Database: return Paths.DATABASES_ROOT; case DocumentCollection: return Utils.joinPath(parentResourceLink, Paths.COLLECTIONS_PATH_SEGMENT); case Document: return Utils.joinPath(parentResourceLink, Paths.DOCUMENTS_PATH_SEGMENT); case Offer: return Paths.OFFERS_ROOT; case User: return Utils.joinPath(parentResourceLink, Paths.USERS_PATH_SEGMENT); case ClientEncryptionKey: return Utils.joinPath(parentResourceLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); case Permission: return Utils.joinPath(parentResourceLink, Paths.PERMISSIONS_PATH_SEGMENT); case Attachment: return Utils.joinPath(parentResourceLink, Paths.ATTACHMENTS_PATH_SEGMENT); case StoredProcedure: return Utils.joinPath(parentResourceLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); case Trigger: return Utils.joinPath(parentResourceLink, Paths.TRIGGERS_PATH_SEGMENT); case UserDefinedFunction: return Utils.joinPath(parentResourceLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); case Conflict: return Utils.joinPath(parentResourceLink, Paths.CONFLICTS_PATH_SEGMENT); default: throw new IllegalArgumentException("resource type not supported"); } } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(CosmosQueryRequestOptions options) { if (options == null) { return null; } return qryOptAccessor.getImpl(options).getOperationContextAndListenerTuple(); } private OperationContextAndListenerTuple getOperationContextAndListenerTuple(RequestOptions options) { if (options == null) { return null; } return options.getOperationContextAndListenerTuple(); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum) { return createQuery(parentResourceLink, sqlQuery, state, klass, resourceTypeEnum, this); } private <T> Flux<FeedResponse<T>> createQuery( String parentResourceLink, SqlQuerySpec sqlQuery, QueryFeedOperationState state, Class<T> klass, ResourceType resourceTypeEnum, DiagnosticsClientContext innerDiagnosticsFactory) { String resourceLink = parentResourceLinkToQueryLink(parentResourceLink, resourceTypeEnum); CosmosQueryRequestOptions nonNullQueryOptions = state.getQueryOptions(); UUID correlationActivityIdOfRequestOptions = qryOptAccessor .getImpl(nonNullQueryOptions) .getCorrelationActivityId(); UUID correlationActivityId = correlationActivityIdOfRequestOptions != null ? correlationActivityIdOfRequestOptions : randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(nonNullQueryOptions)); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(nonNullQueryOptions)); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); state.registerDiagnosticsFactory( diagnosticsFactory::reset, diagnosticsFactory::merge); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> createQueryInternal( diagnosticsFactory, resourceLink, sqlQuery, state.getQueryOptions(), klass, resourceTypeEnum, queryClient, correlationActivityId, isQueryCancelledOnTimeout), invalidPartitionExceptionRetryPolicy ).flatMap(result -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot()); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(state.getDiagnosticsContextSnapshot())); } private <T> Flux<FeedResponse<T>> createQueryInternal( DiagnosticsClientContext diagnosticsClientContext, String resourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, IDocumentQueryClient queryClient, UUID activityId, final AtomicBoolean isQueryCancelledOnTimeout) { Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory .createDocumentQueryExecutionContextAsync(diagnosticsClientContext, queryClient, resourceTypeEnum, klass, sqlQuery, options, resourceLink, false, activityId, Configs.isQueryPlanCachingEnabled(), queryPlanCache, isQueryCancelledOnTimeout); AtomicBoolean isFirstResponse = new AtomicBoolean(true); return executionContext.flatMap(iDocumentQueryExecutionContext -> { QueryInfo queryInfo = null; if (iDocumentQueryExecutionContext instanceof PipelinedQueryExecutionContextBase) { queryInfo = ((PipelinedQueryExecutionContextBase<T>) iDocumentQueryExecutionContext).getQueryInfo(); } QueryInfo finalQueryInfo = queryInfo; Flux<FeedResponse<T>> feedResponseFlux = iDocumentQueryExecutionContext.executeAsync() .map(tFeedResponse -> { if (finalQueryInfo != null) { if (finalQueryInfo.hasSelectValue()) { ModelBridgeInternal .addQueryInfoToFeedResponse(tFeedResponse, finalQueryInfo); } if (isFirstResponse.compareAndSet(true, false)) { ModelBridgeInternal.addQueryPlanDiagnosticsContextToFeedResponse(tFeedResponse, finalQueryInfo.getQueryPlanDiagnosticsContext()); } } return tFeedResponse; }); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, resourceTypeEnum, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsClientContext); } return feedResponseFlux; }, Queues.SMALL_BUFFER_SIZE, 1); } private static void applyExceptionToMergedDiagnosticsForQuery( CosmosQueryRequestOptions requestOptions, CosmosException exception, DiagnosticsClientContext diagnosticsClientContext) { CosmosDiagnostics mostRecentlyCreatedDiagnostics = diagnosticsClientContext.getMostRecentlyCreatedDiagnostics(); if (mostRecentlyCreatedDiagnostics != null) { BridgeInternal.setCosmosDiagnostics( exception, mostRecentlyCreatedDiagnostics); } else { List<CosmosDiagnostics> cancelledRequestDiagnostics = qryOptAccessor .getCancelledRequestDiagnosticsTracker(requestOptions); if (cancelledRequestDiagnostics != null && !cancelledRequestDiagnostics.isEmpty()) { CosmosDiagnostics aggregratedCosmosDiagnostics = cancelledRequestDiagnostics .stream() .reduce((first, toBeMerged) -> { ClientSideRequestStatistics clientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); ClientSideRequestStatistics toBeMergedClientSideRequestStatistics = ImplementationBridgeHelpers .CosmosDiagnosticsHelper .getCosmosDiagnosticsAccessor() .getClientSideRequestStatisticsRaw(first); if (clientSideRequestStatistics == null) { return toBeMerged; } else { clientSideRequestStatistics.mergeClientSideRequestStatistics(toBeMergedClientSideRequestStatistics); return first; } }) .get(); BridgeInternal.setCosmosDiagnostics(exception, aggregratedCosmosDiagnostics); } } } private static <T> Flux<FeedResponse<T>> getFeedResponseFluxWithTimeout( Flux<FeedResponse<T>> feedResponseFlux, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, CosmosQueryRequestOptions requestOptions, final AtomicBoolean isQueryCancelledOnTimeout, DiagnosticsClientContext diagnosticsClientContext) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException cancellationException = getNegativeTimeoutException(null, endToEndTimeout); cancellationException.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery( requestOptions, cancellationException, diagnosticsClientContext); return cancellationException; } return throwable; }); } return feedResponseFlux .timeout(endToEndTimeout) .onErrorMap(throwable -> { if (throwable instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); isQueryCancelledOnTimeout.set(true); applyExceptionToMergedDiagnosticsForQuery(requestOptions, exception, diagnosticsClientContext); return exception; } return throwable; }); } @Override public Flux<FeedResponse<Database>> queryDatabases(String query, QueryFeedOperationState state) { return queryDatabases(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Database>> queryDatabases(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(Paths.DATABASES_ROOT, querySpec, state, Database.class, ResourceType.Database); } @Override public Mono<ResourceResponse<DocumentCollection>> createCollection(String databaseLink, DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> this.createCollectionInternal(databaseLink, collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> createCollectionInternal(String databaseLink, DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Creating a Collection. databaseLink: [{}], Collection id: [{}]", databaseLink, collection.getId()); validateResource(collection); String path = Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Create); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Create, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); }); } catch (Exception e) { logger.debug("Failure in creating a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> replaceCollection(DocumentCollection collection, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceCollectionInternal(collection, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> replaceCollectionInternal(DocumentCollection collection, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (collection == null) { throw new IllegalArgumentException("collection"); } logger.debug("Replacing a Collection. id: [{}]", collection.getId()); validateResource(collection); String path = Utils.joinPath(collection.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer byteBuffer = collection.serializeJsonToByteBuffer(CosmosItemSerializer.DEFAULT_SERIALIZER, null); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.CONTAINER_SERIALIZATION); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.DocumentCollection, path, byteBuffer, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)) .doOnNext(resourceResponse -> { if (resourceResponse.getResource() != null) { this.sessionContainer.setSessionToken( request, resourceResponse.getResource().getResourceId(), getAltLink(resourceResponse.getResource()), resourceResponse.getResponseHeaders()); } }); } catch (Exception e) { logger.debug("Failure in replacing a collection. due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<DocumentCollection>> deleteCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> deleteCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in deleting a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<RxDocumentServiceResponse> delete(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.DELETE) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> deleteAllItemsByPartitionKey(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> read(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } Mono<RxDocumentServiceResponse> readFeed(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> getStoreProxy(requestPopulated).processMessage(requestPopulated)); } private Mono<RxDocumentServiceResponse> query(RxDocumentServiceRequest request) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> this.getStoreProxy(requestPopulated).processMessage(requestPopulated) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } )); } @Override public Mono<ResourceResponse<DocumentCollection>> readCollection(String collectionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readCollectionInternal(collectionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<DocumentCollection>> readCollectionInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Reading a Collection. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.DocumentCollection, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DocumentCollection, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, DocumentCollection.class)); } catch (Exception e) { logger.debug("Failure in reading a collection, due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<DocumentCollection>> readCollections(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.DocumentCollection, DocumentCollection.class, Utils.joinPath(databaseLink, Paths.COLLECTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, String query, QueryFeedOperationState state) { return createQuery(databaseLink, new SqlQuerySpec(query), state, DocumentCollection.class, ResourceType.DocumentCollection); } @Override public Flux<FeedResponse<DocumentCollection>> queryCollections(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, DocumentCollection.class, ResourceType.DocumentCollection); } private static String serializeProcedureParams(List<Object> objectArray) { String[] stringArray = new String[objectArray.size()]; for (int i = 0; i < objectArray.size(); ++i) { Object object = objectArray.get(i); if (object instanceof JsonSerializable) { stringArray[i] = ((JsonSerializable) object).toJson(); } else { try { stringArray[i] = mapper.writeValueAsString(object); } catch (IOException e) { throw new IllegalArgumentException("Can't serialize the object into the json string", e); } } } return String.format("[%s]", StringUtils.join(stringArray, ",")); } private static void validateResource(Resource resource) { if (!StringUtils.isEmpty(resource.getId())) { if (resource.getId().indexOf('/') != -1 || resource.getId().indexOf('\\') != -1 || resource.getId().indexOf('?') != -1 || resource.getId().indexOf(' throw new IllegalArgumentException("Id contains illegal chars."); } if (resource.getId().endsWith(" ")) { throw new IllegalArgumentException("Id ends with a space."); } } } private Map<String, String> getRequestHeaders(RequestOptions options, ResourceType resourceType, OperationType operationType) { Map<String, String> headers = new HashMap<>(); if (this.useMultipleWriteLocations) { headers.put(HttpConstants.HttpHeaders.ALLOW_TENTATIVE_WRITES, Boolean.TRUE.toString()); } if (consistencyLevel != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, consistencyLevel.toString()); } if (options == null) { if (!this.contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } return headers; } Map<String, String> customOptions = options.getHeaders(); if (customOptions != null) { headers.putAll(customOptions); } boolean contentResponseOnWriteEnabled = this.contentResponseOnWriteEnabled; if (options.isContentResponseOnWriteEnabled() != null) { contentResponseOnWriteEnabled = options.isContentResponseOnWriteEnabled(); } if (!contentResponseOnWriteEnabled && resourceType.equals(ResourceType.Document) && operationType.isWriteOperation()) { headers.put(HttpConstants.HttpHeaders.PREFER, HttpConstants.HeaderValues.PREFER_RETURN_MINIMAL); } if (options.getIfMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_MATCH, options.getIfMatchETag()); } if (options.getIfNoneMatchETag() != null) { headers.put(HttpConstants.HttpHeaders.IF_NONE_MATCH, options.getIfNoneMatchETag()); } if (options.getConsistencyLevel() != null) { headers.put(HttpConstants.HttpHeaders.CONSISTENCY_LEVEL, options.getConsistencyLevel().toString()); } if (options.getIndexingDirective() != null) { headers.put(HttpConstants.HttpHeaders.INDEXING_DIRECTIVE, options.getIndexingDirective().toString()); } if (options.getPostTriggerInclude() != null && options.getPostTriggerInclude().size() > 0) { String postTriggerInclude = StringUtils.join(options.getPostTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.POST_TRIGGER_INCLUDE, postTriggerInclude); } if (options.getPreTriggerInclude() != null && options.getPreTriggerInclude().size() > 0) { String preTriggerInclude = StringUtils.join(options.getPreTriggerInclude(), ","); headers.put(HttpConstants.HttpHeaders.PRE_TRIGGER_INCLUDE, preTriggerInclude); } if (!Strings.isNullOrEmpty(options.getSessionToken())) { headers.put(HttpConstants.HttpHeaders.SESSION_TOKEN, options.getSessionToken()); } if (options.getResourceTokenExpirySeconds() != null) { headers.put(HttpConstants.HttpHeaders.RESOURCE_TOKEN_EXPIRY, String.valueOf(options.getResourceTokenExpirySeconds())); } if (options.getOfferThroughput() != null && options.getOfferThroughput() >= 0) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, options.getOfferThroughput().toString()); } else if (options.getOfferType() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_TYPE, options.getOfferType()); } if (options.getOfferThroughput() == null) { if (options.getThroughputProperties() != null) { Offer offer = ModelBridgeInternal.getOfferFromThroughputProperties(options.getThroughputProperties()); final OfferAutoscaleSettings offerAutoscaleSettings = offer.getOfferAutoScaleSettings(); OfferAutoscaleAutoUpgradeProperties autoscaleAutoUpgradeProperties = null; if (offerAutoscaleSettings != null) { autoscaleAutoUpgradeProperties = offer.getOfferAutoScaleSettings().getAutoscaleAutoUpgradeProperties(); } if (offer.hasOfferThroughput() && (offerAutoscaleSettings != null && offerAutoscaleSettings.getMaxThroughput() >= 0 || autoscaleAutoUpgradeProperties != null && autoscaleAutoUpgradeProperties .getAutoscaleThroughputProperties() .getIncrementPercent() >= 0)) { throw new IllegalArgumentException("Autoscale provisioned throughput can not be configured with " + "fixed offer"); } if (offer.hasOfferThroughput()) { headers.put(HttpConstants.HttpHeaders.OFFER_THROUGHPUT, String.valueOf(offer.getThroughput())); } else if (offer.getOfferAutoScaleSettings() != null) { headers.put(HttpConstants.HttpHeaders.OFFER_AUTOPILOT_SETTINGS, offer.getOfferAutoScaleSettings().toJson()); } } } if (options.isQuotaInfoEnabled()) { headers.put(HttpConstants.HttpHeaders.POPULATE_QUOTA_INFO, String.valueOf(true)); } if (options.isScriptLoggingEnabled()) { headers.put(HttpConstants.HttpHeaders.SCRIPT_ENABLE_LOGGING, String.valueOf(true)); } if (options.getDedicatedGatewayRequestOptions() != null) { if (options.getDedicatedGatewayRequestOptions().getMaxIntegratedCacheStaleness() != null) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_CACHE_STALENESS, String.valueOf(Utils.getMaxIntegratedCacheStalenessInMillis(options.getDedicatedGatewayRequestOptions()))); } if (options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed()) { headers.put(HttpConstants.HttpHeaders.DEDICATED_GATEWAY_PER_REQUEST_BYPASS_CACHE, String.valueOf(options.getDedicatedGatewayRequestOptions().isIntegratedCacheBypassed())); } } return headers; } public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return this.resetSessionTokenRetryPolicy; } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Document document, RequestOptions options) { Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs .map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private Mono<RxDocumentServiceRequest> addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object document, RequestOptions options, Mono<Utils.ValueHolder<DocumentCollection>> collectionObs) { return collectionObs.map(collectionValueHolder -> { addPartitionKeyInformation(request, contentAsByteBuffer, document, options, collectionValueHolder.v); return request; }); } private void addPartitionKeyInformation(RxDocumentServiceRequest request, ByteBuffer contentAsByteBuffer, Object objectDoc, RequestOptions options, DocumentCollection collection) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); PartitionKeyInternal partitionKeyInternal = null; if (options != null && options.getPartitionKey() != null && options.getPartitionKey().equals(PartitionKey.NONE)){ partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else if (options != null && options.getPartitionKey() != null) { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(options.getPartitionKey()); } else if (partitionKeyDefinition == null || partitionKeyDefinition.getPaths().size() == 0) { partitionKeyInternal = PartitionKeyInternal.getEmpty(); } else if (contentAsByteBuffer != null || objectDoc != null) { InternalObjectNode internalObjectNode; if (objectDoc instanceof InternalObjectNode) { internalObjectNode = (InternalObjectNode) objectDoc; } else if (objectDoc instanceof ObjectNode) { internalObjectNode = new InternalObjectNode((ObjectNode)objectDoc); } else if (contentAsByteBuffer != null) { contentAsByteBuffer.rewind(); internalObjectNode = new InternalObjectNode(contentAsByteBuffer); } else { throw new IllegalStateException("ContentAsByteBuffer and objectDoc are null"); } Instant serializationStartTime = Instant.now(); partitionKeyInternal = PartitionKeyHelper.extractPartitionKeyValueFromDocument(internalObjectNode, partitionKeyDefinition); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.PARTITION_KEY_FETCH_SERIALIZATION ); SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } } else { throw new UnsupportedOperationException("PartitionKey value must be supplied for this operation."); } request.setPartitionKeyInternal(partitionKeyInternal); request.setPartitionKeyDefinition(partitionKeyDefinition); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } private Mono<RxDocumentServiceRequest> getCreateDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, OperationType operationType, DiagnosticsClientContext clientContextOverride) { if (StringUtils.isEmpty(documentCollectionLink)) { throw new IllegalArgumentException("documentCollectionLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Instant serializationStartTimeUTC = Instant.now(); String trackingId = null; if (options != null) { trackingId = options.getTrackingId(); } ByteBuffer content = InternalObjectNode.serializeJsonToByteBuffer(document, options.getEffectiveItemSerializer(), trackingId); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, operationType); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), operationType, ResourceType.Document, path, requestHeaders, options, content); if (operationType.isWriteOperation() && options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if( options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return addPartitionKeyInformation(request, content, document, options, collectionObs); } private Mono<RxDocumentServiceRequest> getBatchDocumentRequest(DocumentClientRetryPolicy requestRetryPolicy, String documentCollectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { checkArgument(StringUtils.isNotEmpty(documentCollectionLink), "expected non empty documentCollectionLink"); checkNotNull(serverBatchRequest, "expected non null serverBatchRequest"); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap(Utils.getUTF8Bytes(serverBatchRequest.getRequestBody())); Instant serializationEndTimeUTC = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTimeUTC, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); String path = Utils.joinPath(documentCollectionLink, Paths.DOCUMENTS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Batch); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Batch, ResourceType.Document, path, requestHeaders, options, content); if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (requestRetryPolicy != null) { requestRetryPolicy.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); return collectionObs.map((Utils.ValueHolder<DocumentCollection> collectionValueHolder) -> { addBatchHeaders(request, serverBatchRequest, collectionValueHolder.v); return request; }); } private RxDocumentServiceRequest addBatchHeaders(RxDocumentServiceRequest request, ServerBatchRequest serverBatchRequest, DocumentCollection collection) { if(serverBatchRequest instanceof SinglePartitionKeyServerBatchRequest) { PartitionKey partitionKey = ((SinglePartitionKeyServerBatchRequest) serverBatchRequest).getPartitionKeyValue(); PartitionKeyInternal partitionKeyInternal; if (partitionKey.equals(PartitionKey.NONE)) { PartitionKeyDefinition partitionKeyDefinition = collection.getPartitionKey(); partitionKeyInternal = ModelBridgeInternal.getNonePartitionKey(partitionKeyDefinition); } else { partitionKeyInternal = BridgeInternal.getPartitionKeyInternal(partitionKey); } request.setPartitionKeyInternal(partitionKeyInternal); request.getHeaders().put(HttpConstants.HttpHeaders.PARTITION_KEY, Utils.escapeNonAscii(partitionKeyInternal.toJson())); } else if(serverBatchRequest instanceof PartitionKeyRangeServerBatchRequest) { request.setPartitionKeyRangeIdentity(new PartitionKeyRangeIdentity(((PartitionKeyRangeServerBatchRequest) serverBatchRequest).getPartitionKeyRangeId())); } else { throw new UnsupportedOperationException("Unknown Server request."); } request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_REQUEST, Boolean.TRUE.toString()); request.getHeaders().put(HttpConstants.HttpHeaders.IS_BATCH_ATOMIC, String.valueOf(serverBatchRequest.isAtomicBatch())); request.getHeaders().put(HttpConstants.HttpHeaders.SHOULD_BATCH_CONTINUE_ON_ERROR, String.valueOf(serverBatchRequest.isShouldContinueOnError())); request.setPartitionKeyDefinition(collection.getPartitionKey()); request.setNumberOfItemsInBatchRequest(serverBatchRequest.getOperations().size()); return request; } /** * NOTE: Caller needs to consume it by subscribing to this Mono in order for the request to populate headers * @param request request to populate headers to * @param httpMethod http method * @return Mono, which on subscription will populate the headers in the request passed in the argument. */ private Mono<RxDocumentServiceRequest> populateHeadersAsync(RxDocumentServiceRequest request, RequestVerb httpMethod) { request.getHeaders().put(HttpConstants.HttpHeaders.X_DATE, Utils.nowAsRFC1123()); if (this.masterKeyOrResourceToken != null || this.resourceTokensMap != null || this.cosmosAuthorizationTokenResolver != null || this.credential != null) { String resourceName = request.getResourceAddress(); String authorization = this.getUserAuthorizationToken( resourceName, request.getResourceType(), httpMethod, request.getHeaders(), AuthorizationTokenType.PrimaryMasterKey, request.properties); try { authorization = URLEncoder.encode(authorization, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new IllegalStateException("Failed to encode authtoken.", e); } request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); } if (this.apiType != null) { request.getHeaders().put(HttpConstants.HttpHeaders.API_TYPE, this.apiType.toString()); } this.populateCapabilitiesHeader(request); if ((RequestVerb.POST.equals(httpMethod) || RequestVerb.PUT.equals(httpMethod)) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON); } if (RequestVerb.PATCH.equals(httpMethod) && !request.getHeaders().containsKey(HttpConstants.HttpHeaders.CONTENT_TYPE)) { request.getHeaders().put(HttpConstants.HttpHeaders.CONTENT_TYPE, RuntimeConstants.MediaTypes.JSON_PATCH); } if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.ACCEPT)) { request.getHeaders().put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); } MetadataDiagnosticsContext metadataDiagnosticsCtx = BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics); if (this.requiresFeedRangeFiltering(request)) { return request.getFeedRange() .populateFeedRangeFilteringHeaders( this.getPartitionKeyRangeCache(), request, this.collectionCache .resolveCollectionAsync(metadataDiagnosticsCtx, request) .flatMap(documentCollectionValueHolder -> { if (documentCollectionValueHolder.v != null) { request.setPartitionKeyDefinition(documentCollectionValueHolder.v.getPartitionKey()); } return Mono.just(documentCollectionValueHolder); }) ) .flatMap(this::populateAuthorizationHeader); } return this.populateAuthorizationHeader(request); } private void populateCapabilitiesHeader(RxDocumentServiceRequest request) { if (!request.getHeaders().containsKey(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES)) { request .getHeaders() .put(HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES); } } private boolean requiresFeedRangeFiltering(RxDocumentServiceRequest request) { if (request.getResourceType() != ResourceType.Document && request.getResourceType() != ResourceType.Conflict) { return false; } switch (request.getOperationType()) { case ReadFeed: case Query: case SqlQuery: return request.getFeedRange() != null; default: return false; } } @Override public Mono<RxDocumentServiceRequest> populateAuthorizationHeader(RxDocumentServiceRequest request) { if (request == null) { throw new IllegalArgumentException("request"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { request.getHeaders().put(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return request; }); } else { return Mono.just(request); } } @Override public Mono<HttpHeaders> populateAuthorizationHeader(HttpHeaders httpHeaders) { if (httpHeaders == null) { throw new IllegalArgumentException("httpHeaders"); } if (this.authorizationTokenType == AuthorizationTokenType.AadToken) { return AadTokenAuthorizationHelper.getAuthorizationToken(this.tokenCredentialCache) .map(authorization -> { httpHeaders.set(HttpConstants.HttpHeaders.AUTHORIZATION, authorization); return httpHeaders; }); } return Mono.just(httpHeaders); } @Override public AuthorizationTokenType getAuthorizationTokenType() { return this.authorizationTokenType; } @Override public String getUserAuthorizationToken(String resourceName, ResourceType resourceType, RequestVerb requestVerb, Map<String, String> headers, AuthorizationTokenType tokenType, Map<String, Object> properties) { if (this.cosmosAuthorizationTokenResolver != null) { return this.cosmosAuthorizationTokenResolver.getAuthorizationToken(requestVerb.toUpperCase(), resourceName, this.resolveCosmosResourceType(resourceType).toString(), properties != null ? Collections.unmodifiableMap(properties) : null); } else if (credential != null) { return this.authorizationTokenProvider.generateKeyAuthorizationSignature(requestVerb, resourceName, resourceType, headers); } else if (masterKeyOrResourceToken != null && hasAuthKeyResourceToken && resourceTokensMap == null) { return masterKeyOrResourceToken; } else { assert resourceTokensMap != null; if(resourceType.equals(ResourceType.DatabaseAccount)) { return this.firstResourceTokenFromPermissionFeed; } return ResourceTokenAuthorizationHelper.getAuthorizationTokenUsingResourceTokens(resourceTokensMap, requestVerb, resourceName, headers); } } private CosmosResourceType resolveCosmosResourceType(ResourceType resourceType) { CosmosResourceType cosmosResourceType = ModelBridgeInternal.fromServiceSerializedFormat(resourceType.toString()); if (cosmosResourceType == null) { return CosmosResourceType.SYSTEM; } return cosmosResourceType; } void captureSessionToken(RxDocumentServiceRequest request, RxDocumentServiceResponse response) { this.sessionContainer.setSessionToken(request, response.getResponseHeaders()); } private Mono<RxDocumentServiceResponse> create(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { RxStoreModel storeProxy = this.getStoreProxy(requestPopulated); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return storeProxy.processMessage(requestPopulated, operationContextAndListenerTuple); }); } private Mono<RxDocumentServiceResponse> upsert(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy, OperationContextAndListenerTuple operationContextAndListenerTuple) { return populateHeadersAsync(request, RequestVerb.POST) .flatMap(requestPopulated -> { Map<String, String> headers = requestPopulated.getHeaders(); assert (headers != null); headers.put(HttpConstants.HttpHeaders.IS_UPSERT, "true"); if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated, operationContextAndListenerTuple) .map(response -> { this.captureSessionToken(requestPopulated, response); return response; } ); }); } private Mono<RxDocumentServiceResponse> replace(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PUT) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } private Mono<RxDocumentServiceResponse> patch(RxDocumentServiceRequest request, DocumentClientRetryPolicy documentClientRetryPolicy) { return populateHeadersAsync(request, RequestVerb.PATCH) .flatMap(requestPopulated -> { if (documentClientRetryPolicy.getRetryContext() != null && documentClientRetryPolicy.getRetryContext().getRetryCount() > 0) { documentClientRetryPolicy.getRetryContext().updateEndTime(); } return getStoreProxy(requestPopulated).processMessage(requestPopulated); }); } @Override public Mono<ResourceResponse<Document>> createDocument( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Create, (opt, e2ecfg, clientCtxOverride) -> createDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> createDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs(() -> createDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> createDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy requestRetryPolicy, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Creating a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> requestObs = getCreateDocumentRequest(requestRetryPolicy, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Create, clientContextOverride); return requestObs .flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in creating a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } private static <T> Mono<T> getPointOperationResponseMonoWithE2ETimeout( RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, Mono<T> rxDocumentServiceResponseMono, ScopedDiagnosticsFactory scopedDiagnosticsFactory) { requestOptions.setCosmosEndToEndLatencyPolicyConfig(endToEndPolicyConfig); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { Duration endToEndTimeout = endToEndPolicyConfig.getEndToEndOperationTimeout(); if (endToEndTimeout.isNegative()) { CosmosDiagnostics latestCosmosDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (latestCosmosDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } return Mono.error(getNegativeTimeoutException(scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(), endToEndTimeout)); } return rxDocumentServiceResponseMono .timeout(endToEndTimeout) .onErrorMap(throwable -> getCancellationExceptionForPointOperations( scopedDiagnosticsFactory, throwable, requestOptions.getMarkE2ETimeoutInRequestContextCallbackHook())); } return rxDocumentServiceResponseMono; } private static Throwable getCancellationExceptionForPointOperations( ScopedDiagnosticsFactory scopedDiagnosticsFactory, Throwable throwable, AtomicReference<Runnable> markE2ETimeoutInRequestContextCallbackHook) { Throwable unwrappedException = reactor.core.Exceptions.unwrap(throwable); if (unwrappedException instanceof TimeoutException) { CosmosException exception = new OperationCancelledException(); exception.setStackTrace(throwable.getStackTrace()); Runnable actualCallback = markE2ETimeoutInRequestContextCallbackHook.get(); if (actualCallback != null) { logger.trace("Calling actual Mark E2E timeout callback"); actualCallback.run(); } CosmosDiagnostics lastDiagnosticsSnapshot = scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics(); if (lastDiagnosticsSnapshot == null) { scopedDiagnosticsFactory.createDiagnostics(); } BridgeInternal.setCosmosDiagnostics(exception, scopedDiagnosticsFactory.getMostRecentlyCreatedDiagnostics()); return exception; } return throwable; } private static CosmosException getNegativeTimeoutException(CosmosDiagnostics cosmosDiagnostics, Duration negativeTimeout) { checkNotNull(negativeTimeout, "Argument 'negativeTimeout' must not be null"); checkArgument( negativeTimeout.isNegative(), "This exception should only be used for negative timeouts"); String message = String.format("Negative timeout '%s' provided.", negativeTimeout); CosmosException exception = new OperationCancelledException(message, null); BridgeInternal.setSubStatusCode(exception, HttpConstants.SubStatusCodes.NEGATIVE_TIMEOUT_PROVIDED); if (cosmosDiagnostics != null) { BridgeInternal.setCosmosDiagnostics(exception, cosmosDiagnostics); } return exception; } @Override public Mono<ResourceResponse<Document>> upsertDocument(String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Upsert, (opt, e2ecfg, clientCtxOverride) -> upsertDocumentCore( collectionLink, document, opt, disableAutomaticIdGeneration, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> upsertDocumentCore( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { requestRetryPolicy = new PartitionKeyMismatchRetryPolicy(collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRetryPolicyInstance = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> upsertDocumentInternal( collectionLink, document, nonNullRequestOptions, disableAutomaticIdGeneration, finalRetryPolicyInstance, scopedDiagnosticsFactory), finalRetryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> upsertDocumentInternal( String collectionLink, Object document, RequestOptions options, boolean disableAutomaticIdGeneration, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { logger.debug("Upserting a Document. collectionLink: [{}]", collectionLink); Mono<RxDocumentServiceRequest> reqObs = getCreateDocumentRequest( retryPolicyInstance, collectionLink, document, options, disableAutomaticIdGeneration, OperationType.Upsert, clientContextOverride); return reqObs .flatMap(request -> upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in upserting a document due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(String documentLink, Object document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( documentLink, document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( String documentLink, Object document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); if (nonNullRequestOptions.getPartitionKey() == null) { String collectionLink = Utils.getCollectionName(documentLink); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, nonNullRequestOptions); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( documentLink, document, nonNullRequestOptions, finalRequestRetryPolicy, endToEndPolicyConfig, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Object document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } if (document == null) { throw new IllegalArgumentException("document"); } Document typedDocument = Document.fromObject(document, options.getEffectiveItemSerializer()); return this.replaceDocumentInternal( documentLink, typedDocument, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> replaceDocument(Document document, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Replace, (opt, e2ecfg, clientCtxOverride) -> replaceDocumentCore( document, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> replaceDocumentCore( Document document, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(clientContextOverride); if (options == null || options.getPartitionKey() == null) { String collectionLink = document.getSelfLink(); requestRetryPolicy = new PartitionKeyMismatchRetryPolicy( collectionCache, requestRetryPolicy, collectionLink, options); } DocumentClientRetryPolicy finalRequestRetryPolicy = requestRetryPolicy; return ObservableHelper.inlineIfPossibleAsObs( () -> replaceDocumentInternal( document, options, finalRequestRetryPolicy, endToEndPolicyConfig, clientContextOverride), requestRetryPolicy); } private Mono<ResourceResponse<Document>> replaceDocumentInternal( Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { try { if (document == null) { throw new IllegalArgumentException("document"); } return this.replaceDocumentInternal( document.getSelfLink(), document, options, retryPolicyInstance, clientContextOverride); } catch (Exception e) { logger.debug("Failure in replacing a database due to [{}]", e.getMessage()); return Mono.error(e); } } private Mono<ResourceResponse<Document>> replaceDocumentInternal( String documentLink, Document document, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { if (document == null) { throw new IllegalArgumentException("document"); } logger.debug("Replacing a Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Replace); Instant serializationStartTimeUTC = Instant.now(); Consumer<Map<String, Object>> onAfterSerialization = null; if (options != null) { String trackingId = options.getTrackingId(); if (trackingId != null && !trackingId.isEmpty()) { onAfterSerialization = (node) -> node.put(Constants.Properties.TRACKING_ID, trackingId); } } ByteBuffer content = document.serializeJsonToByteBuffer(options.getEffectiveItemSerializer(), onAfterSerialization); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Replace, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, content, document, options, collectionObs); return requestObs .flatMap(req -> replace(request, retryPolicyInstance) .map(resp -> toResourceResponse(resp, Document.class))); } private CosmosEndToEndOperationLatencyPolicyConfig getEndToEndOperationLatencyPolicyConfig( RequestOptions options, ResourceType resourceType, OperationType operationType) { return this.getEffectiveEndToEndOperationLatencyPolicyConfig( options != null ? options.getCosmosEndToEndLatencyPolicyConfig() : null, resourceType, operationType); } private CosmosEndToEndOperationLatencyPolicyConfig getEffectiveEndToEndOperationLatencyPolicyConfig( CosmosEndToEndOperationLatencyPolicyConfig policyConfig, ResourceType resourceType, OperationType operationType) { if (policyConfig != null) { return policyConfig; } if (resourceType != ResourceType.Document) { return null; } if (!operationType.isPointOperation() && Configs.isDefaultE2ETimeoutDisabledForNonPointOperations()) { return null; } return this.cosmosEndToEndOperationLatencyPolicyConfig; } @Override public Mono<ResourceResponse<Document>> patchDocument(String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Patch, (opt, e2ecfg, clientCtxOverride) -> patchDocumentCore( documentLink, cosmosPatchOperations, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> patchDocumentCore( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> patchDocumentInternal( documentLink, cosmosPatchOperations, nonNullRequestOptions, documentClientRetryPolicy, scopedDiagnosticsFactory), documentClientRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> patchDocumentInternal( String documentLink, CosmosPatchOperations cosmosPatchOperations, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { checkArgument(StringUtils.isNotEmpty(documentLink), "expected non empty documentLink"); checkNotNull(cosmosPatchOperations, "expected non null cosmosPatchOperations"); logger.debug("Running patch operations on Document. documentLink: [{}]", documentLink); final String path = Utils.joinPath(documentLink, null); final Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Document, OperationType.Patch); Instant serializationStartTimeUTC = Instant.now(); ByteBuffer content = ByteBuffer.wrap( PatchUtil.serializeCosmosPatchToByteArray(cosmosPatchOperations, options)); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics serializationDiagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTimeUTC, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_SERIALIZATION); final RxDocumentServiceRequest request = RxDocumentServiceRequest.create( clientContextOverride, OperationType.Patch, ResourceType.Document, path, requestHeaders, options, content); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(request.requestContext.cosmosDiagnostics); if (serializationDiagnosticsContext != null) { serializationDiagnosticsContext.addSerializationDiagnostics(serializationDiagnostics); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, null, options, collectionObs); return requestObs .flatMap(req -> patch(request, retryPolicyInstance)) .map(resp -> toResourceResponse(resp, Document.class)); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, null, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } @Override public Mono<ResourceResponse<Document>> deleteDocument(String documentLink, InternalObjectNode internalObjectNode, RequestOptions options) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Delete, (opt, e2ecfg, clientCtxOverride) -> deleteDocumentCore( documentLink, internalObjectNode, opt, e2ecfg, clientCtxOverride), options, options != null && options.getNonIdempotentWriteRetriesEnabled() ); } private Mono<ResourceResponse<Document>> deleteDocumentCore( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> deleteDocumentInternal( documentLink, internalObjectNode, nonNullRequestOptions, requestRetryPolicy, scopedDiagnosticsFactory), requestRetryPolicy), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> deleteDocumentInternal( String documentLink, InternalObjectNode internalObjectNode, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Deleting a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Delete, ResourceType.Document, path, requestHeaders, options); if (options != null && options.getNonIdempotentWriteRetriesEnabled()) { request.setNonIdempotentWriteRetriesEnabled(true); } if (options != null) { options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation( request, null, internalObjectNode, options, collectionObs); return requestObs .flatMap(req -> this.delete(req, retryPolicyInstance, getOperationContextAndListenerTuple(options))) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class)); } catch (Exception e) { logger.debug("Failure in deleting a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKey(String collectionLink, PartitionKey partitionKey, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteAllDocumentsByPartitionKeyInternal(collectionLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<Document>> deleteAllDocumentsByPartitionKeyInternal(String collectionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } logger.debug("Deleting all items by Partition Key. collectionLink: [{}]", collectionLink); String path = Utils.joinPath(collectionLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.PartitionKey, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.PartitionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this .deleteAllItemsByPartitionKey(req, retryPolicyInstance, getOperationContextAndListenerTuple(options)) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in deleting documents due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public Mono<ResourceResponse<Document>> readDocument(String documentLink, RequestOptions options) { return readDocument(documentLink, options, this); } private Mono<ResourceResponse<Document>> readDocument( String documentLink, RequestOptions options, DiagnosticsClientContext innerDiagnosticsFactory) { return wrapPointOperationWithAvailabilityStrategy( ResourceType.Document, OperationType.Read, (opt, e2ecfg, clientCtxOverride) -> readDocumentCore(documentLink, opt, e2ecfg, clientCtxOverride), options, false, innerDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentCore( String documentLink, RequestOptions options, CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, DiagnosticsClientContext clientContextOverride) { RequestOptions nonNullRequestOptions = options != null ? options : new RequestOptions(); ScopedDiagnosticsFactory scopedDiagnosticsFactory = new ScopedDiagnosticsFactory(clientContextOverride, false); DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(scopedDiagnosticsFactory); return getPointOperationResponseMonoWithE2ETimeout( nonNullRequestOptions, endToEndPolicyConfig, ObservableHelper.inlineIfPossibleAsObs( () -> readDocumentInternal( documentLink, nonNullRequestOptions, retryPolicyInstance, scopedDiagnosticsFactory), retryPolicyInstance), scopedDiagnosticsFactory ); } private Mono<ResourceResponse<Document>> readDocumentInternal( String documentLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance, DiagnosticsClientContext clientContextOverride) { try { if (StringUtils.isEmpty(documentLink)) { throw new IllegalArgumentException("documentLink"); } logger.debug("Reading a Document. documentLink: [{}]", documentLink); String path = Utils.joinPath(documentLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.Document, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( getEffectiveClientContext(clientContextOverride), OperationType.Read, ResourceType.Document, path, requestHeaders, options); options.getMarkE2ETimeoutInRequestContextCallbackHook().set( () -> request.requestContext.setIsRequestCancelledOnTimeout(new AtomicBoolean(true))); request.requestContext.setExcludeRegions(options.getExcludeRegions()); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = this.collectionCache.resolveCollectionAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), request); Mono<RxDocumentServiceRequest> requestObs = addPartitionKeyInformation(request, null, null, options, collectionObs); return requestObs.flatMap(req -> this.read(request, retryPolicyInstance) .map(serviceResponse -> toResourceResponse(serviceResponse, Document.class))); } catch (Exception e) { logger.debug("Failure in reading a document due to [{}]", e.getMessage()); return Mono.error(e); } } @Override public <T> Flux<FeedResponse<T>> readDocuments( String collectionLink, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return queryDocuments(collectionLink, "SELECT * FROM r", state, classOfT); } @Override public <T> Mono<FeedResponse<T>> readMany( List<CosmosItemIdentity> itemIdentityList, String collectionLink, QueryFeedOperationState state, Class<T> klass) { final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx) ); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs .flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); Mono<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = partitionKeyRangeCache .tryLookupAsync(BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null); return valueHolderMono .flatMap(collectionRoutingMapValueHolder -> { Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap = new HashMap<>(); CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } itemIdentityList .forEach(itemIdentity -> { if (pkDefinition.getKind().equals(PartitionKind.MULTI_HASH) && ModelBridgeInternal.getPartitionKeyInternal(itemIdentity.getPartitionKey()) .getComponents().size() != pkDefinition.getPaths().size()) { throw new IllegalArgumentException(RMResources.PartitionKeyMismatch); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal( itemIdentity.getPartitionKey()), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); if (partitionRangeItemKeyMap.get(range) == null) { List<CosmosItemIdentity> list = new ArrayList<>(); list.add(itemIdentity); partitionRangeItemKeyMap.put(range, list); } else { List<CosmosItemIdentity> pairs = partitionRangeItemKeyMap.get(range); pairs.add(itemIdentity); partitionRangeItemKeyMap.put(range, pairs); } }); Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = getRangeQueryMap(partitionRangeItemKeyMap, collection.getPartitionKey()); Flux<FeedResponse<T>> pointReads = pointReadsForReadMany( diagnosticsFactory, partitionRangeItemKeyMap, resourceLink, state.getQueryOptions(), klass); Flux<FeedResponse<T>> queries = queryForReadMany( diagnosticsFactory, resourceLink, new SqlQuerySpec(DUMMY_SQL_QUERY), state.getQueryOptions(), klass, ResourceType.Document, collection, Collections.unmodifiableMap(rangeQueryMap)); return Flux.merge(pointReads, queries) .collectList() .map(feedList -> { List<T> finalList = new ArrayList<>(); HashMap<String, String> headers = new HashMap<>(); ConcurrentMap<String, QueryMetrics> aggregatedQueryMetrics = new ConcurrentHashMap<>(); Collection<ClientSideRequestStatistics> aggregateRequestStatistics = new DistinctClientSideRequestStatisticsCollection(); double requestCharge = 0; for (FeedResponse<T> page : feedList) { ConcurrentMap<String, QueryMetrics> pageQueryMetrics = ModelBridgeInternal.queryMetrics(page); if (pageQueryMetrics != null) { pageQueryMetrics.forEach( aggregatedQueryMetrics::putIfAbsent); } requestCharge += page.getRequestCharge(); finalList.addAll(page.getResults()); aggregateRequestStatistics.addAll(diagnosticsAccessor.getClientSideRequestStatistics(page.getCosmosDiagnostics())); } CosmosDiagnostics aggregatedDiagnostics = BridgeInternal.createCosmosDiagnostics(aggregatedQueryMetrics); diagnosticsAccessor.addClientSideDiagnosticsToFeed( aggregatedDiagnostics, aggregateRequestStatistics); state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, 200, 0, finalList.size(), requestCharge, aggregatedDiagnostics, null ); diagnosticsAccessor .setDiagnosticsContext( aggregatedDiagnostics, ctx); } headers.put(HttpConstants.HttpHeaders.REQUEST_CHARGE, Double .toString(requestCharge)); FeedResponse<T> frp = BridgeInternal .createFeedResponseWithQueryMetrics( finalList, headers, aggregatedQueryMetrics, null, false, false, aggregatedDiagnostics); return frp; }); }) .onErrorMap(throwable -> { if (throwable instanceof CosmosException) { CosmosException cosmosException = (CosmosException)throwable; CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); if (diagnostics != null) { state.mergeDiagnosticsContext(); CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); if (ctx != null) { ctxAccessor.recordOperation( ctx, cosmosException.getStatusCode(), cosmosException.getSubStatusCode(), 0, cosmosException.getRequestCharge(), diagnostics, throwable ); diagnosticsAccessor .setDiagnosticsContext( diagnostics, state.getDiagnosticsContextSnapshot()); } } return cosmosException; } return throwable; }); } ); } private Map<PartitionKeyRange, SqlQuerySpec> getRangeQueryMap( Map<PartitionKeyRange, List<CosmosItemIdentity>> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap = new HashMap<>(); List<String> partitionKeySelectors = createPkSelectors(partitionKeyDefinition); for(Map.Entry<PartitionKeyRange, List<CosmosItemIdentity>> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; List<CosmosItemIdentity> cosmosItemIdentityList = entry.getValue(); if (cosmosItemIdentityList.size() > 1) { if (partitionKeySelectors.size() == 1 && partitionKeySelectors.get(0).equals("[\"id\"]")) { sqlQuerySpec = createReadManyQuerySpecPartitionKeyIdSame(cosmosItemIdentityList); } else { sqlQuerySpec = createReadManyQuerySpec(entry.getValue(), partitionKeySelectors); } rangeQueryMap.put(entry.getKey(), sqlQuerySpec); } } return rangeQueryMap; } private SqlQuerySpec createReadManyQuerySpecPartitionKeyIdSame(List<CosmosItemIdentity> idPartitionKeyPairList) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE c.id IN ( "); for (int i = 0; i < idPartitionKeyPairList.size(); i++) { CosmosItemIdentity itemIdentity = idPartitionKeyPairList.get(i); String idValue = itemIdentity.getId(); String idParamName = "@param" + i; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append(idParamName); if (i < idPartitionKeyPairList.size() - 1) { queryStringBuilder.append(", "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private SqlQuerySpec createReadManyQuerySpec( List<CosmosItemIdentity> itemIdentities, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE ( "); int paramCount = 0; for (int i = 0; i < itemIdentities.size(); i++) { CosmosItemIdentity itemIdentity = itemIdentities.get(i); PartitionKey pkValueAsPartitionKey = itemIdentity.getPartitionKey(); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(pkValueAsPartitionKey).toObjectArray(); List<List<String>> partitionKeyParams = new ArrayList<>(); int pathCount = 0; for (Object pkComponentValue : pkValues) { String pkParamName = "@param" + paramCount; partitionKeyParams.add(Arrays.asList(partitionKeySelectors.get(pathCount), pkParamName)); parameters.add(new SqlParameter(pkParamName, pkComponentValue)); paramCount++; pathCount++; } String idValue = itemIdentity.getId(); String idParamName = "@param" + paramCount; paramCount++; parameters.add(new SqlParameter(idParamName, idValue)); queryStringBuilder.append("("); queryStringBuilder.append("c.id = "); queryStringBuilder.append(idParamName); for (List<String> pkParam: partitionKeyParams) { queryStringBuilder.append(" AND "); queryStringBuilder.append(" c"); queryStringBuilder.append(pkParam.get(0)); queryStringBuilder.append((" = ")); queryStringBuilder.append(pkParam.get(1)); } queryStringBuilder.append(" )"); if (i < itemIdentities.size() - 1) { queryStringBuilder.append(" OR "); } } queryStringBuilder.append(" )"); return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } private List<String> createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { return partitionKeyDefinition.getPaths() .stream() .map(pathPart -> StringUtils.substring(pathPart, 1)) .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) .map(part -> "[\"" + part + "\"]") .collect(Collectors.toList()); } private <T> Flux<FeedResponse<T>> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, SqlQuerySpec sqlQuery, CosmosQueryRequestOptions options, Class<T> klass, ResourceType resourceTypeEnum, DocumentCollection collection, Map<PartitionKeyRange, SqlQuerySpec> rangeQueryMap) { if (rangeQueryMap.isEmpty()) { return Flux.empty(); } UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(options)); Flux<? extends IDocumentQueryExecutionContext<T>> executionContext = DocumentQueryExecutionContextFactory.createReadManyQueryAsync( diagnosticsFactory, queryClient, collection.getResourceId(), sqlQuery, rangeQueryMap, options, collection.getResourceId(), parentResourceLink, activityId, klass, resourceTypeEnum, isQueryCancelledOnTimeout); Flux<FeedResponse<T>> feedResponseFlux = executionContext.flatMap(IDocumentQueryExecutionContext<T>::executeAsync); RequestOptions requestOptions = options == null? null : ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(options); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(requestOptions, ResourceType.Document, OperationType.Query); if (endToEndPolicyConfig != null && endToEndPolicyConfig.isEnabled()) { return getFeedResponseFluxWithTimeout( feedResponseFlux, endToEndPolicyConfig, options, isQueryCancelledOnTimeout, diagnosticsFactory); } return feedResponseFlux; } private <T> Flux<FeedResponse<T>> pointReadsForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, Map<PartitionKeyRange, List<CosmosItemIdentity>> singleItemPartitionRequestMap, String resourceLink, CosmosQueryRequestOptions queryRequestOptions, Class<T> klass) { CosmosItemSerializer effectiveItemSerializer = getEffectiveItemSerializer(queryRequestOptions); return Flux.fromIterable(singleItemPartitionRequestMap.values()) .flatMap(cosmosItemIdentityList -> { if (cosmosItemIdentityList.size() == 1) { CosmosItemIdentity firstIdentity = cosmosItemIdentityList.get(0); RequestOptions requestOptions = ImplementationBridgeHelpers .CosmosQueryRequestOptionsHelper .getCosmosQueryRequestOptionsAccessor() .toRequestOptions(queryRequestOptions); requestOptions.setPartitionKey(firstIdentity.getPartitionKey()); return this.readDocument((resourceLink + firstIdentity.getId()), requestOptions, diagnosticsFactory) .flatMap(resourceResponse -> Mono.just( new ImmutablePair<ResourceResponse<Document>, CosmosException>(resourceResponse, null) )) .onErrorResume(throwable -> { Throwable unwrappedThrowable = Exceptions.unwrap(throwable); if (unwrappedThrowable instanceof CosmosException) { CosmosException cosmosException = (CosmosException) unwrappedThrowable; int statusCode = cosmosException.getStatusCode(); int subStatusCode = cosmosException.getSubStatusCode(); if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return Mono.just(new ImmutablePair<ResourceResponse<Document>, CosmosException>(null, cosmosException)); } } return Mono.error(unwrappedThrowable); }); } return Mono.empty(); }) .flatMap(resourceResponseToExceptionPair -> { ResourceResponse<Document> resourceResponse = resourceResponseToExceptionPair.getLeft(); CosmosException cosmosException = resourceResponseToExceptionPair.getRight(); FeedResponse<T> feedResponse; if (cosmosException != null) { feedResponse = ModelBridgeInternal.createFeedResponse(new ArrayList<>(), cosmosException.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosException.getDiagnostics()))); } else { CosmosItemResponse<T> cosmosItemResponse = itemResponseAccessor.createCosmosItemResponse(resourceResponse, klass, effectiveItemSerializer); feedResponse = ModelBridgeInternal.createFeedResponse( Arrays.asList(cosmosItemResponse.getItem()), cosmosItemResponse.getResponseHeaders()); diagnosticsAccessor.addClientSideDiagnosticsToFeed( feedResponse.getCosmosDiagnostics(), Collections.singleton( BridgeInternal.getClientSideRequestStatics(cosmosItemResponse.getDiagnostics()))); } return Mono.just(feedResponse); }); } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, String query, QueryFeedOperationState state, Class<T> classOfT) { return queryDocuments(collectionLink, new SqlQuerySpec(query), state, classOfT); } @Override public CosmosItemSerializer getEffectiveItemSerializer(CosmosItemSerializer requestOptionsItemSerializer) { if (requestOptionsItemSerializer != null) { return requestOptionsItemSerializer; } if (this.defaultCustomSerializer != null) { return this.defaultCustomSerializer; } return CosmosItemSerializer.DEFAULT_SERIALIZER; } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = queryRequestOptions != null ? queryRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosItemRequestOptions itemRequestOptions) { CosmosItemSerializer requestOptionsItemSerializer = itemRequestOptions != null ? itemRequestOptions.getCustomItemSerializer() : null; return this.getEffectiveItemSerializer(requestOptionsItemSerializer); } private IDocumentQueryClient documentQueryClientImpl(RxDocumentClientImpl rxDocumentClientImpl, OperationContextAndListenerTuple operationContextAndListenerTuple) { return new IDocumentQueryClient () { @Override public RxCollectionCache getCollectionCache() { return RxDocumentClientImpl.this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return RxDocumentClientImpl.this.partitionKeyRangeCache; } @Override public IRetryPolicyFactory getResetSessionTokenRetryPolicy() { return RxDocumentClientImpl.this.resetSessionTokenRetryPolicy; } @Override public ConsistencyLevel getDefaultConsistencyLevelAsync() { return RxDocumentClientImpl.this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } @Override public ConsistencyLevel getDesiredConsistencyLevelAsync() { return RxDocumentClientImpl.this.consistencyLevel; } @Override public Mono<RxDocumentServiceResponse> executeQueryAsync(RxDocumentServiceRequest request) { if (operationContextAndListenerTuple == null) { return RxDocumentClientImpl.this.query(request).single(); } else { final OperationListener listener = operationContextAndListenerTuple.getOperationListener(); final OperationContext operationContext = operationContextAndListenerTuple.getOperationContext(); request.getHeaders().put(HttpConstants.HttpHeaders.CORRELATED_ACTIVITY_ID, operationContext.getCorrelationActivityId()); listener.requestListener(operationContext, request); return RxDocumentClientImpl.this.query(request).single().doOnNext( response -> listener.responseListener(operationContext, response) ).doOnError( ex -> listener.exceptionListener(operationContext, ex) ); } } @Override public QueryCompatibilityMode getQueryCompatibilityMode() { return QueryCompatibilityMode.Default; } @Override public <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, Supplier<DocumentClientRetryPolicy> retryPolicyFactory, RxDocumentServiceRequest req, BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation) { return RxDocumentClientImpl.this.executeFeedOperationWithAvailabilityStrategy( resourceType, operationType, retryPolicyFactory, req, feedOperation ); } @Override public <T> CosmosItemSerializer getEffectiveItemSerializer(CosmosQueryRequestOptions queryRequestOptions) { return RxDocumentClientImpl.this.getEffectiveItemSerializer(queryRequestOptions); } @Override public Mono<RxDocumentServiceResponse> readFeedAsync(RxDocumentServiceRequest request) { return null; } }; } @Override public <T> Flux<FeedResponse<T>> queryDocuments( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state, Class<T> classOfT) { SqlQuerySpecLogger.getInstance().logQuery(querySpec); return createQuery(collectionLink, querySpec, state, classOfT, ResourceType.Document); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeed( final DocumentCollection collection, final CosmosChangeFeedRequestOptions changeFeedOptions, Class<T> classOfT) { checkNotNull(collection, "Argument 'collection' must not be null."); ChangeFeedQueryImpl<T> changeFeedQueryImpl = new ChangeFeedQueryImpl<>( this, ResourceType.Document, classOfT, collection.getAltLink(), collection.getResourceId(), changeFeedOptions); return changeFeedQueryImpl.executeAsync(); } @Override public <T> Flux<FeedResponse<T>> queryDocumentChangeFeedFromPagedFlux(DocumentCollection collection, ChangeFeedOperationState state, Class<T> classOfT) { return queryDocumentChangeFeed(collection, state.getChangeFeedOptions(), classOfT); } @Override public <T> Flux<FeedResponse<T>> readAllDocuments( String collectionLink, PartitionKey partitionKey, QueryFeedOperationState state, Class<T> classOfT) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (partitionKey == null) { throw new IllegalArgumentException("partitionKey"); } final CosmosQueryRequestOptions effectiveOptions = qryOptAccessor.clone(state.getQueryOptions()); RequestOptions nonNullRequestOptions = qryOptAccessor.toRequestOptions(effectiveOptions); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = nonNullRequestOptions.getCosmosEndToEndLatencyPolicyConfig(); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, ResourceType.Document, OperationType.Query, false, nonNullRequestOptions); ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, false); if (orderedApplicableRegionsForSpeculation.size() < 2) { state.registerDiagnosticsFactory( () -> {}, (ctx) -> diagnosticsFactory.merge(ctx)); } else { state.registerDiagnosticsFactory( () -> diagnosticsFactory.reset(), (ctx) -> diagnosticsFactory.merge(ctx)); } RxDocumentServiceRequest request = RxDocumentServiceRequest.create( diagnosticsFactory, OperationType.Query, ResourceType.Document, collectionLink, null ); Flux<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request).flux(); return collectionObs.flatMap(documentCollectionResourceResponse -> { DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); List<String> partitionKeySelectors = createPkSelectors(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, partitionKeySelectors); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); UUID activityId = randomUuid(); final AtomicBoolean isQueryCancelledOnTimeout = new AtomicBoolean(false); IDocumentQueryClient queryClient = documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(state.getQueryOptions())); InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, resourceLink, ModelBridgeInternal.getPropertiesFromQueryRequestOptions(effectiveOptions)); Flux<FeedResponse<T>> innerFlux = ObservableHelper.fluxInlineIfPossibleAsObs( () -> { Flux<Utils.ValueHolder<CollectionRoutingMap>> valueHolderMono = this.partitionKeyRangeCache .tryLookupAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), null, null).flux(); return valueHolderMono.flatMap(collectionRoutingMapValueHolder -> { CollectionRoutingMap routingMap = collectionRoutingMapValueHolder.v; if (routingMap == null) { return Mono.error(new IllegalStateException("Failed to get routing map.")); } String effectivePartitionKeyString = PartitionKeyInternalHelper .getEffectivePartitionKeyString( BridgeInternal.getPartitionKeyInternal(partitionKey), pkDefinition); PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey(effectivePartitionKeyString); return createQueryInternal( diagnosticsFactory, resourceLink, querySpec, ModelBridgeInternal.setPartitionKeyRangeIdInternal(effectiveOptions, range.getId()), classOfT, ResourceType.Document, queryClient, activityId, isQueryCancelledOnTimeout); }); }, invalidPartitionExceptionRetryPolicy); if (orderedApplicableRegionsForSpeculation.size() < 2) { return innerFlux; } return innerFlux .flatMap(result -> { diagnosticsFactory.merge(nonNullRequestOptions); return Mono.just(result); }) .onErrorMap(throwable -> { diagnosticsFactory.merge(nonNullRequestOptions); return throwable; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); }); } @Override public Map<String, PartitionedQueryExecutionInfo> getQueryPlanCache() { return queryPlanCache; } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(final String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<PartitionKeyRange>> readPartitionKeyRanges(String collectionLink, CosmosQueryRequestOptions options) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(options, ResourceType.PartitionKeyRange, PartitionKeyRange.class, Utils.joinPath(collectionLink, Paths.PARTITION_KEY_RANGES_PATH_SEGMENT)); } private RxDocumentServiceRequest getStoredProcedureRequest(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } validateResource(storedProcedure); String path = Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); } private RxDocumentServiceRequest getUserDefinedFunctionRequest(String collectionLink, UserDefinedFunction udf, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (udf == null) { throw new IllegalArgumentException("udf"); } validateResource(udf); String path = Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); } @Override public Mono<ResourceResponse<StoredProcedure>> createStoredProcedure(String collectionLink, StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createStoredProcedureInternal(collectionLink, storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> createStoredProcedureInternal(String collectionLink, StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a StoredProcedure. collectionLink: [{}], storedProcedure id [{}]", collectionLink, storedProcedure.getId()); RxDocumentServiceRequest request = getStoredProcedureRequest(collectionLink, storedProcedure, options, OperationType.Create); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in creating a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedure(StoredProcedure storedProcedure, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceStoredProcedureInternal(storedProcedure, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> replaceStoredProcedureInternal(StoredProcedure storedProcedure, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (storedProcedure == null) { throw new IllegalArgumentException("storedProcedure"); } logger.debug("Replacing a StoredProcedure. storedProcedure id [{}]", storedProcedure.getId()); RxDocumentClientImpl.validateResource(storedProcedure); String path = Utils.joinPath(storedProcedure.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.StoredProcedure, path, storedProcedure, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in replacing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy requestRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteStoredProcedureInternal(storedProcedureLink, options, requestRetryPolicy), requestRetryPolicy); } private Mono<ResourceResponse<StoredProcedure>> deleteStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Deleting a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in deleting a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<StoredProcedure>> readStoredProcedure(String storedProcedureLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readStoredProcedureInternal(storedProcedureLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<StoredProcedure>> readStoredProcedureInternal(String storedProcedureLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(storedProcedureLink)) { throw new IllegalArgumentException("storedProcedureLink"); } logger.debug("Reading a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.StoredProcedure, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, StoredProcedure.class)); } catch (Exception e) { logger.debug("Failure in reading a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<StoredProcedure>> readStoredProcedures(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.StoredProcedure, StoredProcedure.class, Utils.joinPath(collectionLink, Paths.STORED_PROCEDURES_PATH_SEGMENT)); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, String query, QueryFeedOperationState state) { return queryStoredProcedures(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<StoredProcedure>> queryStoredProcedures(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, StoredProcedure.class, ResourceType.StoredProcedure); } @Override public Mono<StoredProcedureResponse> executeStoredProcedure(String storedProcedureLink, RequestOptions options, List<Object> procedureParams) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeStoredProcedureInternal(storedProcedureLink, options, procedureParams, documentClientRetryPolicy), documentClientRetryPolicy); } @Override public Mono<CosmosBatchResponse> executeBatchRequest(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, boolean disableAutomaticIdGeneration) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> executeBatchRequestInternal(collectionLink, serverBatchRequest, options, documentClientRetryPolicy, disableAutomaticIdGeneration), documentClientRetryPolicy); } private Mono<StoredProcedureResponse> executeStoredProcedureInternal(String storedProcedureLink, RequestOptions options, List<Object> procedureParams, DocumentClientRetryPolicy retryPolicy) { try { logger.debug("Executing a StoredProcedure. storedProcedureLink [{}]", storedProcedureLink); String path = Utils.joinPath(storedProcedureLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.StoredProcedure, OperationType.ExecuteJavaScript); requestHeaders.put(HttpConstants.HttpHeaders.ACCEPT, RuntimeConstants.MediaTypes.JSON); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ExecuteJavaScript, ResourceType.StoredProcedure, path, procedureParams != null && !procedureParams.isEmpty() ? RxDocumentClientImpl.serializeProcedureParams(procedureParams) : "", requestHeaders, options); if (options != null) { request.requestContext.setExcludeRegions(options.getExcludeRegions()); } if (retryPolicy != null) { retryPolicy.onBeforeSendRequest(request); } Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> create(request, retryPolicy, getOperationContextAndListenerTuple(options)) .map(response -> { this.captureSessionToken(request, response); return toStoredProcedureResponse(response); })); } catch (Exception e) { logger.debug("Failure in executing a StoredProcedure due to [{}]", e.getMessage(), e); return Mono.error(e); } } private Mono<CosmosBatchResponse> executeBatchRequestInternal(String collectionLink, ServerBatchRequest serverBatchRequest, RequestOptions options, DocumentClientRetryPolicy requestRetryPolicy, boolean disableAutomaticIdGeneration) { try { logger.debug("Executing a Batch request with number of operations {}", serverBatchRequest.getOperations().size()); Mono<RxDocumentServiceRequest> requestObs = getBatchDocumentRequest(requestRetryPolicy, collectionLink, serverBatchRequest, options, disableAutomaticIdGeneration); Mono<RxDocumentServiceResponse> responseObservable = requestObs.flatMap(request -> create(request, requestRetryPolicy, getOperationContextAndListenerTuple(options))); return responseObservable .map(serviceResponse -> BatchResponseParser.fromDocumentServiceResponse(serviceResponse, serverBatchRequest, true)); } catch (Exception ex) { logger.debug("Failure in executing a batch due to [{}]", ex.getMessage(), ex); return Mono.error(ex); } } @Override public Mono<ResourceResponse<Trigger>> createTrigger(String collectionLink, Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createTriggerInternal(collectionLink, trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> createTriggerInternal(String collectionLink, Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a Trigger. collectionLink [{}], trigger id [{}]", collectionLink, trigger.getId()); RxDocumentServiceRequest request = getTriggerRequest(collectionLink, trigger, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in creating a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getTriggerRequest(String collectionLink, Trigger trigger, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } if (trigger == null) { throw new IllegalArgumentException("trigger"); } RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Trigger, path, trigger, requestHeaders, options); } @Override public Mono<ResourceResponse<Trigger>> replaceTrigger(Trigger trigger, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceTriggerInternal(trigger, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> replaceTriggerInternal(Trigger trigger, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (trigger == null) { throw new IllegalArgumentException("trigger"); } logger.debug("Replacing a Trigger. trigger id [{}]", trigger.getId()); RxDocumentClientImpl.validateResource(trigger); String path = Utils.joinPath(trigger.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Trigger, path, trigger, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in replacing a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> deleteTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> deleteTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Deleting a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in deleting a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Trigger>> readTrigger(String triggerLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readTriggerInternal(triggerLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Trigger>> readTriggerInternal(String triggerLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(triggerLink)) { throw new IllegalArgumentException("triggerLink"); } logger.debug("Reading a Trigger. triggerLink [{}]", triggerLink); String path = Utils.joinPath(triggerLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Trigger, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Trigger, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Trigger.class)); } catch (Exception e) { logger.debug("Failure in reading a Trigger due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Trigger>> readTriggers(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Trigger, Trigger.class, Utils.joinPath(collectionLink, Paths.TRIGGERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, String query, QueryFeedOperationState state) { return queryTriggers(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Trigger>> queryTriggers(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Trigger.class, ResourceType.Trigger); } @Override public Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunction(String collectionLink, UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserDefinedFunctionInternal(collectionLink, udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> createUserDefinedFunctionInternal(String collectionLink, UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Creating a UserDefinedFunction. collectionLink [{}], udf id [{}]", collectionLink, udf.getId()); RxDocumentServiceRequest request = getUserDefinedFunctionRequest(collectionLink, udf, options, OperationType.Create); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.create(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in creating a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunction(UserDefinedFunction udf, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserDefinedFunctionInternal(udf, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> replaceUserDefinedFunctionInternal(UserDefinedFunction udf, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (udf == null) { throw new IllegalArgumentException("udf"); } logger.debug("Replacing a UserDefinedFunction. udf id [{}]", udf.getId()); validateResource(udf); String path = Utils.joinPath(udf.getSelfLink(), null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.UserDefinedFunction, path, udf, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in replacing a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> deleteUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Deleting a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null){ retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in deleting a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunction(String udfLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserDefinedFunctionInternal(udfLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<UserDefinedFunction>> readUserDefinedFunctionInternal(String udfLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(udfLink)) { throw new IllegalArgumentException("udfLink"); } logger.debug("Reading a UserDefinedFunction. udfLink [{}]", udfLink); String path = Utils.joinPath(udfLink, null); Map<String, String> requestHeaders = this.getRequestHeaders(options, ResourceType.UserDefinedFunction, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.UserDefinedFunction, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, UserDefinedFunction.class)); } catch (Exception e) { logger.debug("Failure in reading a UserDefinedFunction due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<UserDefinedFunction>> readUserDefinedFunctions(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.UserDefinedFunction, UserDefinedFunction.class, Utils.joinPath(collectionLink, Paths.USER_DEFINED_FUNCTIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, String query, QueryFeedOperationState state) { return queryUserDefinedFunctions(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<UserDefinedFunction>> queryUserDefinedFunctions( String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, UserDefinedFunction.class, ResourceType.UserDefinedFunction); } @Override public Mono<ResourceResponse<Conflict>> readConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> readConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Reading a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in reading a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Conflict>> readConflicts(String collectionLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } return nonDocumentReadFeed(state, ResourceType.Conflict, Conflict.class, Utils.joinPath(collectionLink, Paths.CONFLICTS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, String query, QueryFeedOperationState state) { return queryConflicts(collectionLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Conflict>> queryConflicts(String collectionLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(collectionLink, querySpec, state, Conflict.class, ResourceType.Conflict); } @Override public Mono<ResourceResponse<Conflict>> deleteConflict(String conflictLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteConflictInternal(conflictLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Conflict>> deleteConflictInternal(String conflictLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(conflictLink)) { throw new IllegalArgumentException("conflictLink"); } logger.debug("Deleting a Conflict. conflictLink [{}]", conflictLink); String path = Utils.joinPath(conflictLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Conflict, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Conflict, path, requestHeaders, options); Mono<RxDocumentServiceRequest> reqObs = addPartitionKeyInformation(request, null, null, options); return reqObs.flatMap(req -> { if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Conflict.class)); }); } catch (Exception e) { logger.debug("Failure in deleting a Conflict due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> createUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createUserInternal(databaseLink, user, options, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<User>> createUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in creating a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> upsertUser(String databaseLink, User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertUserInternal(databaseLink, user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> upsertUserInternal(String databaseLink, User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a User. databaseLink [{}], user id [{}]", databaseLink, user.getId()); RxDocumentServiceRequest request = getUserRequest(databaseLink, user, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in upserting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getUserRequest(String databaseLink, User user, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (user == null) { throw new IllegalArgumentException("user"); } RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.User, path, user, requestHeaders, options); } @Override public Mono<ResourceResponse<User>> replaceUser(User user, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceUserInternal(user, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> replaceUserInternal(User user, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (user == null) { throw new IllegalArgumentException("user"); } logger.debug("Replacing a User. user id [{}]", user.getId()); RxDocumentClientImpl.validateResource(user); String path = Utils.joinPath(user.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.User, path, user, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in replacing a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } public Mono<ResourceResponse<User>> deleteUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deleteUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> deleteUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Deleting a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in deleting a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<User>> readUser(String userLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readUserInternal(userLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<User>> readUserInternal(String userLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } logger.debug("Reading a User. userLink [{}]", userLink); String path = Utils.joinPath(userLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.User, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.User, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, User.class)); } catch (Exception e) { logger.debug("Failure in reading a User due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<User>> readUsers(String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.User, User.class, Utils.joinPath(databaseLink, Paths.USERS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, String query, QueryFeedOperationState state) { return queryUsers(databaseLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<User>> queryUsers(String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, User.class, ResourceType.User); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKey(String clientEncryptionKeyLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readClientEncryptionKeyInternal(clientEncryptionKeyLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> readClientEncryptionKeyInternal(String clientEncryptionKeyLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(clientEncryptionKeyLink)) { throw new IllegalArgumentException("clientEncryptionKeyLink"); } logger.debug("Reading a client encryption key. clientEncryptionKeyLink [{}]", clientEncryptionKeyLink); String path = Utils.joinPath(clientEncryptionKeyLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.ClientEncryptionKey, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in reading a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKey(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createClientEncryptionKeyInternal(databaseLink, clientEncryptionKey, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> createClientEncryptionKeyInternal(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a client encryption key. databaseLink [{}], clientEncryptionKey id [{}]", databaseLink, clientEncryptionKey.getId()); RxDocumentServiceRequest request = getClientEncryptionKeyRequest(databaseLink, clientEncryptionKey, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in creating a client encryption key due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getClientEncryptionKeyRequest(String databaseLink, ClientEncryptionKey clientEncryptionKey, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); } @Override public Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKey(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceClientEncryptionKeyInternal(clientEncryptionKey, nameBasedLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<ClientEncryptionKey>> replaceClientEncryptionKeyInternal(ClientEncryptionKey clientEncryptionKey, String nameBasedLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (clientEncryptionKey == null) { throw new IllegalArgumentException("clientEncryptionKey"); } logger.debug("Replacing a clientEncryptionKey. clientEncryptionKey id [{}]", clientEncryptionKey.getId()); RxDocumentClientImpl.validateResource(clientEncryptionKey); String path = Utils.joinPath(nameBasedLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.ClientEncryptionKey, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.ClientEncryptionKey, path, clientEncryptionKey, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, ClientEncryptionKey.class)); } catch (Exception e) { logger.debug("Failure in replacing a clientEncryptionKey due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<ClientEncryptionKey>> readClientEncryptionKeys( String databaseLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(databaseLink)) { throw new IllegalArgumentException("databaseLink"); } return nonDocumentReadFeed(state, ResourceType.ClientEncryptionKey, ClientEncryptionKey.class, Utils.joinPath(databaseLink, Paths.CLIENT_ENCRYPTION_KEY_PATH_SEGMENT)); } @Override public Flux<FeedResponse<ClientEncryptionKey>> queryClientEncryptionKeys( String databaseLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(databaseLink, querySpec, state, ClientEncryptionKey.class, ResourceType.ClientEncryptionKey); } @Override public Mono<ResourceResponse<Permission>> createPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> createPermissionInternal(userLink, permission, options, documentClientRetryPolicy), this.resetSessionTokenRetryPolicy.getRequestPolicy(null)); } private Mono<ResourceResponse<Permission>> createPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Creating a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Create); return this.create(request, documentClientRetryPolicy, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in creating a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> upsertPermission(String userLink, Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> upsertPermissionInternal(userLink, permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> upsertPermissionInternal(String userLink, Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { logger.debug("Upserting a Permission. userLink [{}], permission id [{}]", userLink, permission.getId()); RxDocumentServiceRequest request = getPermissionRequest(userLink, permission, options, OperationType.Upsert); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.upsert(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in upserting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } private RxDocumentServiceRequest getPermissionRequest(String userLink, Permission permission, RequestOptions options, OperationType operationType) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } if (permission == null) { throw new IllegalArgumentException("permission"); } RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, operationType); return RxDocumentServiceRequest.create(this, operationType, ResourceType.Permission, path, permission, requestHeaders, options); } @Override public Mono<ResourceResponse<Permission>> replacePermission(Permission permission, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replacePermissionInternal(permission, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> replacePermissionInternal(Permission permission, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (permission == null) { throw new IllegalArgumentException("permission"); } logger.debug("Replacing a Permission. permission id [{}]", permission.getId()); RxDocumentClientImpl.validateResource(permission); String path = Utils.joinPath(permission.getSelfLink(), null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Replace); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Permission, path, permission, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.replace(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in replacing a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> deletePermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> deletePermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> deletePermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Deleting a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Delete); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Delete, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.delete(request, retryPolicyInstance, getOperationContextAndListenerTuple(options)).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in deleting a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Permission>> readPermission(String permissionLink, RequestOptions options) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readPermissionInternal(permissionLink, options, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Permission>> readPermissionInternal(String permissionLink, RequestOptions options, DocumentClientRetryPolicy retryPolicyInstance ) { try { if (StringUtils.isEmpty(permissionLink)) { throw new IllegalArgumentException("permissionLink"); } logger.debug("Reading a Permission. permissionLink [{}]", permissionLink); String path = Utils.joinPath(permissionLink, null); Map<String, String> requestHeaders = getRequestHeaders(options, ResourceType.Permission, OperationType.Read); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Permission, path, requestHeaders, options); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Permission.class)); } catch (Exception e) { logger.debug("Failure in reading a Permission due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Permission>> readPermissions(String userLink, QueryFeedOperationState state) { if (StringUtils.isEmpty(userLink)) { throw new IllegalArgumentException("userLink"); } return nonDocumentReadFeed(state, ResourceType.Permission, Permission.class, Utils.joinPath(userLink, Paths.PERMISSIONS_PATH_SEGMENT)); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, String query, QueryFeedOperationState state) { return queryPermissions(userLink, new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Permission>> queryPermissions(String userLink, SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(userLink, querySpec, state, Permission.class, ResourceType.Permission); } @Override public Mono<ResourceResponse<Offer>> replaceOffer(Offer offer) { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> replaceOfferInternal(offer, documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<ResourceResponse<Offer>> replaceOfferInternal(Offer offer, DocumentClientRetryPolicy documentClientRetryPolicy) { try { if (offer == null) { throw new IllegalArgumentException("offer"); } logger.debug("Replacing an Offer. offer id [{}]", offer.getId()); RxDocumentClientImpl.validateResource(offer); String path = Utils.joinPath(offer.getSelfLink(), null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Replace, ResourceType.Offer, path, offer, null, null); return this.replace(request, documentClientRetryPolicy).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in replacing an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Mono<ResourceResponse<Offer>> readOffer(String offerLink) { DocumentClientRetryPolicy retryPolicyInstance = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> readOfferInternal(offerLink, retryPolicyInstance), retryPolicyInstance); } private Mono<ResourceResponse<Offer>> readOfferInternal(String offerLink, DocumentClientRetryPolicy retryPolicyInstance) { try { if (StringUtils.isEmpty(offerLink)) { throw new IllegalArgumentException("offerLink"); } logger.debug("Reading an Offer. offerLink [{}]", offerLink); String path = Utils.joinPath(offerLink, null); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.Offer, path, (HashMap<String, String>)null, null); if (retryPolicyInstance != null) { retryPolicyInstance.onBeforeSendRequest(request); } return this.read(request, retryPolicyInstance).map(response -> toResourceResponse(response, Offer.class)); } catch (Exception e) { logger.debug("Failure in reading an Offer due to [{}]", e.getMessage(), e); return Mono.error(e); } } @Override public Flux<FeedResponse<Offer>> readOffers(QueryFeedOperationState state) { return nonDocumentReadFeed(state, ResourceType.Offer, Offer.class, Utils.joinPath(Paths.OFFERS_PATH_SEGMENT, null)); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( QueryFeedOperationState state, ResourceType resourceType, Class<T> klass, String resourceLink) { return nonDocumentReadFeed(state.getQueryOptions(), resourceType, klass, resourceLink); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeed( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink) { DocumentClientRetryPolicy retryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.fluxInlineIfPossibleAsObs( () -> nonDocumentReadFeedInternal(options, resourceType, klass, resourceLink, retryPolicy), retryPolicy); } private <T> Flux<FeedResponse<T>> nonDocumentReadFeedInternal( CosmosQueryRequestOptions options, ResourceType resourceType, Class<T> klass, String resourceLink, DocumentClientRetryPolicy retryPolicy) { final CosmosQueryRequestOptions nonNullOptions = options != null ? options : new CosmosQueryRequestOptions(); Integer maxItemCount = ModelBridgeInternal.getMaxItemCountFromQueryRequestOptions(nonNullOptions); int maxPageSize = maxItemCount != null ? maxItemCount : -1; assert(resourceType != ResourceType.Document); BiFunction<String, Integer, RxDocumentServiceRequest> createRequestFunc = (continuationToken, pageSize) -> { Map<String, String> requestHeaders = new HashMap<>(); if (continuationToken != null) { requestHeaders.put(HttpConstants.HttpHeaders.CONTINUATION, continuationToken); } requestHeaders.put(HttpConstants.HttpHeaders.PAGE_SIZE, Integer.toString(pageSize)); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.ReadFeed, resourceType, resourceLink, requestHeaders, nonNullOptions); retryPolicy.onBeforeSendRequest(request); return request; }; Function<RxDocumentServiceRequest, Mono<FeedResponse<T>>> executeFunc = request -> readFeed(request) .map(response -> feedResponseAccessor.createFeedResponse( response, CosmosItemSerializer.DEFAULT_SERIALIZER, klass)); return Paginator .getPaginatedQueryResultAsObservable( nonNullOptions, createRequestFunc, executeFunc, maxPageSize); } @Override public Flux<FeedResponse<Offer>> queryOffers(String query, QueryFeedOperationState state) { return queryOffers(new SqlQuerySpec(query), state); } @Override public Flux<FeedResponse<Offer>> queryOffers(SqlQuerySpec querySpec, QueryFeedOperationState state) { return createQuery(null, querySpec, state, Offer.class, ResourceType.Offer); } @Override public Mono<DatabaseAccount> getDatabaseAccount() { DocumentClientRetryPolicy documentClientRetryPolicy = this.resetSessionTokenRetryPolicy.getRequestPolicy(null); return ObservableHelper.inlineIfPossibleAsObs(() -> getDatabaseAccountInternal(documentClientRetryPolicy), documentClientRetryPolicy); } private Mono<DatabaseAccount> getDatabaseAccountInternal(DocumentClientRetryPolicy documentClientRetryPolicy) { try { logger.debug("Getting Database Account"); RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", (HashMap<String, String>) null, null); return this.read(request, documentClientRetryPolicy).map(ModelBridgeInternal::toDatabaseAccount); } catch (Exception e) { logger.debug("Failure in getting Database Account due to [{}]", e.getMessage(), e); return Mono.error(e); } } public ISessionContainer getSession() { return this.sessionContainer; } public void setSession(ISessionContainer sessionContainer) { this.sessionContainer = sessionContainer; } @Override public RxClientCollectionCache getCollectionCache() { return this.collectionCache; } @Override public RxPartitionKeyRangeCache getPartitionKeyRangeCache() { return partitionKeyRangeCache; } @Override public GlobalEndpointManager getGlobalEndpointManager() { return this.globalEndpointManager; } @Override public AddressSelector getAddressSelector() { return new AddressSelector(this.addressResolver, this.configs.getProtocol()); } public Flux<DatabaseAccount> getDatabaseAccountFromEndpoint(URI endpoint) { return Flux.defer(() -> { RxDocumentServiceRequest request = RxDocumentServiceRequest.create(this, OperationType.Read, ResourceType.DatabaseAccount, "", null, (Object) null); return this.populateHeadersAsync(request, RequestVerb.GET) .flatMap(requestPopulated -> { requestPopulated.setEndpointOverride(endpoint); return this.gatewayProxy.processMessage(requestPopulated).doOnError(e -> { String message = String.format("Failed to retrieve database account information. %s", e.getCause() != null ? e.getCause().toString() : e.toString()); logger.warn(message); }).map(rsp -> rsp.getResource(DatabaseAccount.class)) .doOnNext(databaseAccount -> this.useMultipleWriteLocations = this.connectionPolicy.isMultipleWriteRegionsEnabled() && BridgeInternal.isEnableMultipleWriteLocations(databaseAccount)); }); }); } /** * Certain requests must be routed through gateway even when the client connectivity mode is direct. * * @param request * @return RxStoreModel */ private RxStoreModel getStoreProxy(RxDocumentServiceRequest request) { if (request.useGatewayMode) { return this.gatewayProxy; } ResourceType resourceType = request.getResourceType(); OperationType operationType = request.getOperationType(); if (resourceType == ResourceType.Offer || resourceType == ResourceType.ClientEncryptionKey || resourceType.isScript() && operationType != OperationType.ExecuteJavaScript || resourceType == ResourceType.PartitionKeyRange || resourceType == ResourceType.PartitionKey && operationType == OperationType.Delete) { return this.gatewayProxy; } if (operationType == OperationType.Create || operationType == OperationType.Upsert) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection || resourceType == ResourceType.Permission) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Delete) { if (resourceType == ResourceType.Database || resourceType == ResourceType.User || resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Replace) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else if (operationType == OperationType.Read) { if (resourceType == ResourceType.DocumentCollection) { return this.gatewayProxy; } else { return this.storeModel; } } else { if ((operationType == OperationType.Query || operationType == OperationType.SqlQuery || operationType == OperationType.ReadFeed) && Utils.isCollectionChild(request.getResourceType())) { if (request.getPartitionKeyRangeIdentity() == null && request.getHeaders().get(HttpConstants.HttpHeaders.PARTITION_KEY) == null) { return this.gatewayProxy; } } return this.storeModel; } } @Override public void close() { logger.info("Attempting to close client {}", this.clientId); if (!closed.getAndSet(true)) { activeClientsCnt.decrementAndGet(); logger.info("Shutting down ..."); logger.info("Closing Global Endpoint Manager ..."); LifeCycleUtils.closeQuietly(this.globalEndpointManager); logger.info("Closing StoreClientFactory ..."); LifeCycleUtils.closeQuietly(this.storeClientFactory); logger.info("Shutting down reactorHttpClient ..."); LifeCycleUtils.closeQuietly(this.reactorHttpClient); logger.info("Shutting down CpuMonitor ..."); CpuMemoryMonitor.unregister(this); if (this.throughputControlEnabled.get()) { logger.info("Closing ThroughputControlStore ..."); this.throughputControlStore.close(); } logger.info("Shutting down completed."); } else { logger.warn("Already shutdown!"); } } @Override public synchronized void enableThroughputControlGroup(ThroughputControlGroupInternal group, Mono<Integer> throughputQueryMono) { checkNotNull(group, "Throughput control group can not be null"); if (this.throughputControlEnabled.compareAndSet(false, true)) { this.throughputControlStore = new ThroughputControlStore( this.collectionCache, this.connectionPolicy.getConnectionMode(), this.partitionKeyRangeCache); if (ConnectionMode.DIRECT == this.connectionPolicy.getConnectionMode()) { this.storeModel.enableThroughputControl(throughputControlStore); } else { this.gatewayProxy.enableThroughputControl(throughputControlStore); } } this.throughputControlStore.enableThroughputControlGroup(group, throughputQueryMono); } @Override public Flux<Void> submitOpenConnectionTasksAndInitCaches(CosmosContainerProactiveInitConfig proactiveContainerInitConfig) { return this.storeModel.submitOpenConnectionTasksAndInitCaches(proactiveContainerInitConfig); } @Override public ConsistencyLevel getDefaultConsistencyLevelOfAccount() { return this.gatewayConfigurationReader.getDefaultConsistencyLevel(); } /*** * Configure fault injector provider. * * @param injectorProvider the fault injector provider. */ @Override public void configureFaultInjectorProvider(IFaultInjectorProvider injectorProvider) { checkNotNull(injectorProvider, "Argument 'injectorProvider' can not be null"); if (this.connectionPolicy.getConnectionMode() == ConnectionMode.DIRECT) { this.storeModel.configureFaultInjectorProvider(injectorProvider, this.configs); this.addressResolver.configureFaultInjectorProvider(injectorProvider, this.configs); } this.gatewayProxy.configureFaultInjectorProvider(injectorProvider, this.configs); } @Override public void recordOpenConnectionsAndInitCachesCompleted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesCompleted(cosmosContainerIdentities); } @Override public void recordOpenConnectionsAndInitCachesStarted(List<CosmosContainerIdentity> cosmosContainerIdentities) { this.storeModel.recordOpenConnectionsAndInitCachesStarted(cosmosContainerIdentities); } @Override public String getMasterKeyOrResourceToken() { return this.masterKeyOrResourceToken; } private static SqlQuerySpec createLogicalPartitionScanQuerySpec( PartitionKey partitionKey, List<String> partitionKeySelectors) { StringBuilder queryStringBuilder = new StringBuilder(); List<SqlParameter> parameters = new ArrayList<>(); queryStringBuilder.append("SELECT * FROM c WHERE"); Object[] pkValues = ModelBridgeInternal.getPartitionKeyInternal(partitionKey).toObjectArray(); String pkParamNamePrefix = "@pkValue"; for (int i = 0; i < pkValues.length; i++) { StringBuilder subQueryStringBuilder = new StringBuilder(); String sqlParameterName = pkParamNamePrefix + i; if (i > 0) { subQueryStringBuilder.append(" AND "); } subQueryStringBuilder.append(" c"); subQueryStringBuilder.append(partitionKeySelectors.get(i)); subQueryStringBuilder.append((" = ")); subQueryStringBuilder.append(sqlParameterName); parameters.add(new SqlParameter(sqlParameterName, pkValues[i])); queryStringBuilder.append(subQueryStringBuilder); } return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } @Override public Mono<List<FeedRange>> getFeedRanges(String collectionLink, boolean forceRefresh) { InvalidPartitionExceptionRetryPolicy invalidPartitionExceptionRetryPolicy = new InvalidPartitionExceptionRetryPolicy( this.collectionCache, null, collectionLink, new HashMap<>()); RxDocumentServiceRequest request = RxDocumentServiceRequest.create( this, OperationType.Query, ResourceType.Document, collectionLink, null); invalidPartitionExceptionRetryPolicy.onBeforeSendRequest(request); return ObservableHelper.inlineIfPossibleAsObs( () -> getFeedRangesInternal(request, collectionLink, forceRefresh), invalidPartitionExceptionRetryPolicy); } private Mono<List<FeedRange>> getFeedRangesInternal( RxDocumentServiceRequest request, String collectionLink, boolean forceRefresh) { logger.debug("getFeedRange collectionLink=[{}] - forceRefresh={}", collectionLink, forceRefresh); if (StringUtils.isEmpty(collectionLink)) { throw new IllegalArgumentException("collectionLink"); } Mono<Utils.ValueHolder<DocumentCollection>> collectionObs = collectionCache.resolveCollectionAsync(null, request); return collectionObs.flatMap(documentCollectionResourceResponse -> { final DocumentCollection collection = documentCollectionResourceResponse.v; if (collection == null) { return Mono.error(new IllegalStateException("Collection cannot be null")); } Mono<Utils.ValueHolder<List<PartitionKeyRange>>> valueHolderMono = partitionKeyRangeCache .tryGetOverlappingRangesAsync( BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), collection.getResourceId(), RANGE_INCLUDING_ALL_PARTITION_KEY_RANGES, forceRefresh, null); return valueHolderMono.map(partitionKeyRangeList -> toFeedRanges(partitionKeyRangeList, request)); }); } private static List<FeedRange> toFeedRanges( Utils.ValueHolder<List<PartitionKeyRange>> partitionKeyRangeListValueHolder, RxDocumentServiceRequest request) { final List<PartitionKeyRange> partitionKeyRangeList = partitionKeyRangeListValueHolder.v; if (partitionKeyRangeList == null) { request.forceNameCacheRefresh = true; throw new InvalidPartitionException(); } List<FeedRange> feedRanges = new ArrayList<>(); partitionKeyRangeList.forEach(pkRange -> feedRanges.add(toFeedRange(pkRange))); return feedRanges; } private static FeedRange toFeedRange(PartitionKeyRange pkRange) { return new FeedRangeEpkImpl(pkRange.toRange()); } /** * Creates a type 4 (pseudo randomly generated) UUID. * <p> * The {@link UUID} is generated using a non-cryptographically strong pseudo random number generator. * * @return A randomly generated {@link UUID}. */ public static UUID randomUuid() { return randomUuid(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); } static UUID randomUuid(long msb, long lsb) { msb &= 0xffffffffffff0fffL; msb |= 0x0000000000004000L; lsb &= 0x3fffffffffffffffL; lsb |= 0x8000000000000000L; return new UUID(msb, lsb); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled) { return wrapPointOperationWithAvailabilityStrategy( resourceType, operationType, callback, initialRequestOptions, idempotentWriteRetriesEnabled, this ); } private Mono<ResourceResponse<Document>> wrapPointOperationWithAvailabilityStrategy( ResourceType resourceType, OperationType operationType, DocumentPointOperation callback, RequestOptions initialRequestOptions, boolean idempotentWriteRetriesEnabled, DiagnosticsClientContext innerDiagnosticsFactory) { checkNotNull(resourceType, "Argument 'resourceType' must not be null."); checkNotNull(operationType, "Argument 'operationType' must not be null."); checkNotNull(callback, "Argument 'callback' must not be null."); final RequestOptions nonNullRequestOptions = initialRequestOptions != null ? initialRequestOptions : new RequestOptions(); checkArgument( resourceType == ResourceType.Document, "This method can only be used for document point operations."); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = getEndToEndOperationLatencyPolicyConfig(nonNullRequestOptions, resourceType, operationType); List<String> orderedApplicableRegionsForSpeculation = getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, idempotentWriteRetriesEnabled, nonNullRequestOptions); if (orderedApplicableRegionsForSpeculation.size() < 2) { return callback.apply(nonNullRequestOptions, endToEndPolicyConfig, innerDiagnosticsFactory); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientPointOperationResult>> monoList = new ArrayList<>(); final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(innerDiagnosticsFactory, false); orderedApplicableRegionsForSpeculation .forEach(region -> { RequestOptions clonedOptions = new RequestOptions(nonNullRequestOptions); if (monoList.isEmpty()) { Mono<NonTransientPointOperationResult> initialMonoAcrossAllRegions = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedOptions.setExcludeRegions( getEffectiveExcludedRegionsForHedging( nonNullRequestOptions.getExcludeRegions(), orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientPointOperationResult> regionalCrossRegionRetryMono = callback.apply(clonedOptions, endToEndPolicyConfig, diagnosticsFactory) .map(NonTransientPointOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientPointOperationResult( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { diagnosticsFactory.merge(nonNullRequestOptions); if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { CosmosException cosmosException = Utils.as(innerException, CosmosException.class); diagnosticsFactory.merge(nonNullRequestOptions); return cosmosException; } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } diagnosticsFactory.merge(nonNullRequestOptions); return exception; }) .doOnCancel(() -> diagnosticsFactory.merge(nonNullRequestOptions)); } private static boolean isCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); return unwrappedException instanceof CosmosException; } private static boolean isNonTransientCosmosException(Throwable t) { final Throwable unwrappedException = Exceptions.unwrap(t); if (!(unwrappedException instanceof CosmosException)) { return false; } CosmosException cosmosException = Utils.as(unwrappedException, CosmosException.class); return isNonTransientResultForHedging( cosmosException.getStatusCode(), cosmosException.getSubStatusCode()); } private List<String> getEffectiveExcludedRegionsForHedging( List<String> initialExcludedRegions, List<String> applicableRegions, String currentRegion) { List<String> effectiveExcludedRegions = new ArrayList<>(); if (initialExcludedRegions != null) { effectiveExcludedRegions.addAll(initialExcludedRegions); } for (String applicableRegion: applicableRegions) { if (!applicableRegion.equals(currentRegion)) { effectiveExcludedRegions.add(applicableRegion); } } return effectiveExcludedRegions; } private static boolean isNonTransientResultForHedging(int statusCode, int subStatusCode) { if (statusCode < HttpConstants.StatusCodes.BADREQUEST) { return true; } if (statusCode == HttpConstants.StatusCodes.REQUEST_TIMEOUT && subStatusCode == HttpConstants.SubStatusCodes.CLIENT_OPERATION_TIMEOUT) { return true; } if (statusCode == HttpConstants.StatusCodes.BADREQUEST || statusCode == HttpConstants.StatusCodes.CONFLICT || statusCode == HttpConstants.StatusCodes.METHOD_NOT_ALLOWED || statusCode == HttpConstants.StatusCodes.PRECONDITION_FAILED || statusCode == HttpConstants.StatusCodes.REQUEST_ENTITY_TOO_LARGE || statusCode == HttpConstants.StatusCodes.UNAUTHORIZED) { return true; } if (statusCode == HttpConstants.StatusCodes.NOTFOUND && subStatusCode == HttpConstants.SubStatusCodes.UNKNOWN) { return true; } return false; } private DiagnosticsClientContext getEffectiveClientContext(DiagnosticsClientContext clientContextOverride) { if (clientContextOverride != null) { return clientContextOverride; } return this; } /** * Returns the applicable endpoints ordered by preference list if any * @param operationType - the operationT * @return the applicable endpoints ordered by preference list if any */ private List<URI> getApplicableEndPoints(OperationType operationType, List<String> excludedRegions) { if (operationType.isReadOnlyOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableReadEndpoints(excludedRegions)); } else if (operationType.isWriteOperation()) { return withoutNulls(this.globalEndpointManager.getApplicableWriteEndpoints(excludedRegions)); } return EMPTY_ENDPOINT_LIST; } private static List<URI> withoutNulls(List<URI> orderedEffectiveEndpointsList) { if (orderedEffectiveEndpointsList == null) { return EMPTY_ENDPOINT_LIST; } int i = 0; while (i < orderedEffectiveEndpointsList.size()) { if (orderedEffectiveEndpointsList.get(i) == null) { orderedEffectiveEndpointsList.remove(i); } else { i++; } } return orderedEffectiveEndpointsList; } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, RequestOptions options) { return getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, isIdempotentWriteRetriesEnabled, options.getExcludeRegions()); } private List<String> getApplicableRegionsForSpeculation( CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig, ResourceType resourceType, OperationType operationType, boolean isIdempotentWriteRetriesEnabled, List<String> excludedRegions) { if (endToEndPolicyConfig == null || !endToEndPolicyConfig.isEnabled()) { return EMPTY_REGION_LIST; } if (resourceType != ResourceType.Document) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !isIdempotentWriteRetriesEnabled) { return EMPTY_REGION_LIST; } if (operationType.isWriteOperation() && !this.globalEndpointManager.canUseMultipleWriteLocations()) { return EMPTY_REGION_LIST; } if (!(endToEndPolicyConfig.getAvailabilityStrategy() instanceof ThresholdBasedAvailabilityStrategy)) { return EMPTY_REGION_LIST; } List<URI> endpoints = getApplicableEndPoints(operationType, excludedRegions); HashSet<String> normalizedExcludedRegions = new HashSet<>(); if (excludedRegions != null) { excludedRegions.forEach(r -> normalizedExcludedRegions.add(r.toLowerCase(Locale.ROOT))); } List<String> orderedRegionsForSpeculation = new ArrayList<>(); endpoints.forEach(uri -> { String regionName = this.globalEndpointManager.getRegionName(uri, operationType); if (!normalizedExcludedRegions.contains(regionName.toLowerCase(Locale.ROOT))) { orderedRegionsForSpeculation.add(regionName); } }); return orderedRegionsForSpeculation; } private <T> Mono<T> executeFeedOperationWithAvailabilityStrategy( final ResourceType resourceType, final OperationType operationType, final Supplier<DocumentClientRetryPolicy> retryPolicyFactory, final RxDocumentServiceRequest req, final BiFunction<Supplier<DocumentClientRetryPolicy>, RxDocumentServiceRequest, Mono<T>> feedOperation ) { checkNotNull(retryPolicyFactory, "Argument 'retryPolicyFactory' must not be null."); checkNotNull(req, "Argument 'req' must not be null."); assert(resourceType == ResourceType.Document); CosmosEndToEndOperationLatencyPolicyConfig endToEndPolicyConfig = this.getEffectiveEndToEndOperationLatencyPolicyConfig( req.requestContext.getEndToEndOperationLatencyPolicyConfig(), resourceType, operationType); List<String> initialExcludedRegions = req.requestContext.getExcludeRegions(); List<String> orderedApplicableRegionsForSpeculation = this.getApplicableRegionsForSpeculation( endToEndPolicyConfig, resourceType, operationType, false, initialExcludedRegions ); if (orderedApplicableRegionsForSpeculation.size() < 2) { return feedOperation.apply(retryPolicyFactory, req); } ThresholdBasedAvailabilityStrategy availabilityStrategy = (ThresholdBasedAvailabilityStrategy)endToEndPolicyConfig.getAvailabilityStrategy(); List<Mono<NonTransientFeedOperationResult<T>>> monoList = new ArrayList<>(); orderedApplicableRegionsForSpeculation .forEach(region -> { RxDocumentServiceRequest clonedRequest = req.clone(); if (monoList.isEmpty()) { Mono<NonTransientFeedOperationResult<T>> initialMonoAcrossAllRegions = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); if (logger.isDebugEnabled()) { monoList.add(initialMonoAcrossAllRegions.doOnSubscribe(c -> logger.debug( "STARTING to process {} operation in region '{}'", operationType, region))); } else { monoList.add(initialMonoAcrossAllRegions); } } else { clonedRequest.requestContext.setExcludeRegions( getEffectiveExcludedRegionsForHedging( initialExcludedRegions, orderedApplicableRegionsForSpeculation, region) ); Mono<NonTransientFeedOperationResult<T>> regionalCrossRegionRetryMono = feedOperation.apply(retryPolicyFactory, clonedRequest) .map(NonTransientFeedOperationResult::new) .onErrorResume( RxDocumentClientImpl::isNonTransientCosmosException, t -> Mono.just( new NonTransientFeedOperationResult<>( Utils.as(Exceptions.unwrap(t), CosmosException.class)))); Duration delayForCrossRegionalRetry = (availabilityStrategy) .getThreshold() .plus((availabilityStrategy) .getThresholdStep() .multipliedBy(monoList.size() - 1)); if (logger.isDebugEnabled()) { monoList.add( regionalCrossRegionRetryMono .doOnSubscribe(c -> logger.debug("STARTING to process {} operation in region '{}'", operationType, region)) .delaySubscription(delayForCrossRegionalRetry)); } else { monoList.add( regionalCrossRegionRetryMono .delaySubscription(delayForCrossRegionalRetry)); } } }); return Mono .firstWithValue(monoList) .flatMap(nonTransientResult -> { if (nonTransientResult.isError()) { return Mono.error(nonTransientResult.exception); } return Mono.just(nonTransientResult.response); }) .onErrorMap(throwable -> { Throwable exception = Exceptions.unwrap(throwable); if (exception instanceof NoSuchElementException) { List<Throwable> innerThrowables = Exceptions .unwrapMultiple(exception.getCause()); int index = 0; for (Throwable innerThrowable : innerThrowables) { Throwable innerException = Exceptions.unwrap(innerThrowable); if (innerException instanceof CosmosException) { return Utils.as(innerException, CosmosException.class); } else if (innerException instanceof NoSuchElementException) { logger.trace( "Operation in {} completed with empty result because it was cancelled.", orderedApplicableRegionsForSpeculation.get(index)); } else if (logger.isWarnEnabled()) { String message = "Unexpected Non-CosmosException when processing operation in '" + orderedApplicableRegionsForSpeculation.get(index) + "'."; logger.warn( message, innerException ); } index++; } } return exception; }); } @FunctionalInterface private interface DocumentPointOperation { Mono<ResourceResponse<Document>> apply(RequestOptions requestOptions, CosmosEndToEndOperationLatencyPolicyConfig endToEndOperationLatencyPolicyConfig, DiagnosticsClientContext clientContextOverride); } private static class NonTransientPointOperationResult { private final ResourceResponse<Document> response; private final CosmosException exception; public NonTransientPointOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientPointOperationResult(ResourceResponse<Document> response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public ResourceResponse<Document> getResponse() { return this.response; } } private static class NonTransientFeedOperationResult<T> { private final T response; private final CosmosException exception; public NonTransientFeedOperationResult(CosmosException exception) { checkNotNull(exception, "Argument 'exception' must not be null."); this.exception = exception; this.response = null; } public NonTransientFeedOperationResult(T response) { checkNotNull(response, "Argument 'response' must not be null."); this.exception = null; this.response = response; } public boolean isError() { return this.exception != null; } public CosmosException getException() { return this.exception; } public T getResponse() { return this.response; } } private static class ScopedDiagnosticsFactory implements DiagnosticsClientContext { private final AtomicBoolean isMerged = new AtomicBoolean(false); private final DiagnosticsClientContext inner; private final ConcurrentLinkedQueue<CosmosDiagnostics> createdDiagnostics; private final boolean shouldCaptureAllFeedDiagnostics; private final AtomicReference<CosmosDiagnostics> mostRecentlyCreatedDiagnostics = new AtomicReference<>(null); public ScopedDiagnosticsFactory(DiagnosticsClientContext inner, boolean shouldCaptureAllFeedDiagnostics) { checkNotNull(inner, "Argument 'inner' must not be null."); this.inner = inner; this.createdDiagnostics = new ConcurrentLinkedQueue<>(); this.shouldCaptureAllFeedDiagnostics = shouldCaptureAllFeedDiagnostics; } @Override public DiagnosticsClientConfig getConfig() { return inner.getConfig(); } @Override public CosmosDiagnostics createDiagnostics() { CosmosDiagnostics diagnostics = inner.createDiagnostics(); createdDiagnostics.add(diagnostics); mostRecentlyCreatedDiagnostics.set(diagnostics); return diagnostics; } @Override public String getUserAgent() { return inner.getUserAgent(); } @Override public CosmosDiagnostics getMostRecentlyCreatedDiagnostics() { return this.mostRecentlyCreatedDiagnostics.get(); } public void merge(RequestOptions requestOptions) { CosmosDiagnosticsContext knownCtx = null; if (requestOptions != null) { CosmosDiagnosticsContext ctxSnapshot = requestOptions.getDiagnosticsContextSnapshot(); if (ctxSnapshot != null) { knownCtx = requestOptions.getDiagnosticsContextSnapshot(); } } merge(knownCtx); } public void merge(CosmosDiagnosticsContext knownCtx) { if (!isMerged.compareAndSet(false, true)) { return; } CosmosDiagnosticsContext ctx = null; if (knownCtx != null) { ctx = knownCtx; } else { for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() != null) { ctx = diagnostics.getDiagnosticsContext(); break; } } } if (ctx == null) { return; } for (CosmosDiagnostics diagnostics : this.createdDiagnostics) { if (diagnostics.getDiagnosticsContext() == null && diagnosticsAccessor.isNotEmpty(diagnostics)) { if (this.shouldCaptureAllFeedDiagnostics && diagnosticsAccessor.getFeedResponseDiagnostics(diagnostics) != null) { AtomicBoolean isCaptured = diagnosticsAccessor.isDiagnosticsCapturedInPagedFlux(diagnostics); if (isCaptured != null) { isCaptured.set(true); } } ctxAccessor.addDiagnostics(ctx, diagnostics); } } } public void reset() { this.createdDiagnostics.clear(); this.isMerged.set(false); } } }
```suggestion .setDescription("The temperature unit to use. Infer this from the user's location."); ```
private static FunctionParameters getCurrentWeatherFunctionParameters() { FunctionProperties location = new FunctionProperties() .setType("string") .setDescription("The city and state, e.g. San Francisco, CA"); FunctionProperties unit = new FunctionProperties() .setType("string") .setEnumString(Arrays.asList("celsius", "fahrenheit")) .setDescription("The temperature unit to use. Infer this from the users location."); Map<String, FunctionProperties> props = new HashMap<>(); props.put("location", location); props.put("unit", unit); return new FunctionParameters() .setType("object") .setRequiredPropertyNames(Arrays.asList("location", "unit")) .setProperties(props); }
.setDescription("The temperature unit to use. Infer this from the users location.");
private static FunctionParameters getCurrentWeatherFunctionParameters() { FunctionProperties location = new FunctionProperties() .setType("string") .setDescription("The city and state, e.g. San Francisco, CA"); FunctionProperties unit = new FunctionProperties() .setType("string") .setEnumString(Arrays.asList("celsius", "fahrenheit")) .setDescription("The temperature unit to use. Infer this from the user's location."); Map<String, FunctionProperties> props = new HashMap<>(); props.put("location", location); props.put("unit", unit); return new FunctionParameters() .setType("object") .setRequiredPropertyNames(Arrays.asList("location", "unit")) .setProperties(props); }
class ChatCompletionsFunctionCall { /** * Runs the sample algorithm and demonstrates how to get chat completions using function call. * * @param args Unused. Arguments to the program. */ public static void main(String[] args) { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildClient(); List<FunctionDefinition> functions = Arrays.asList( new FunctionDefinition("getCurrentWeather") .setDescription("Get the current weather") .setParameters(BinaryData.fromObject(getCurrentWeatherFunctionParameters())) ); List<ChatRequestMessage> chatRequestMessages = new ArrayList<>(); chatRequestMessages.add(new ChatRequestUserMessage("What should I wear in Boston depending on the weather?")); ChatCompletions chatCompletions = client.getChatCompletions(deploymentOrModelId, new ChatCompletionsOptions(chatRequestMessages) .setFunctionCall(FunctionCallConfig.AUTO) .setFunctions(functions)); chatRequestMessages = handleFunctionCallResponse(chatCompletions.getChoices(), chatRequestMessages); ChatCompletions chatCompletionsAnswer = client.getChatCompletions(deploymentOrModelId, new ChatCompletionsOptions(chatRequestMessages)); System.out.printf("Message: %s.%n", chatCompletionsAnswer.getChoices().get(0).getMessage().getContent()); } private static List<ChatRequestMessage> handleFunctionCallResponse(List<ChatChoice> choices, List<ChatRequestMessage> chatMessages) { for (ChatChoice choice : choices) { ChatResponseMessage choiceMessage = choice.getMessage(); FunctionCall functionCall = choiceMessage.getFunctionCall(); if (CompletionsFinishReason.FUNCTION_CALL.equals(choice.getFinishReason())) { System.out.printf("Function name: %s, arguments: %s.%n", functionCall.getName(), functionCall.getArguments()); WeatherLocation weatherLocation = BinaryData.fromString(functionCall.getArguments()) .toObject(WeatherLocation.class); int currentWeather = getCurrentWeather(weatherLocation); chatMessages.add(new ChatRequestUserMessage(String.format("The weather in %s is %d degrees %s.", weatherLocation.getLocation(), currentWeather, weatherLocation.getUnit()))); } else { ChatRequestAssistantMessage messageHistory = new ChatRequestAssistantMessage(choiceMessage.getContent()); messageHistory.setFunctionCall(choiceMessage.getFunctionCall()); chatMessages.add(messageHistory); } } return chatMessages; } private static int getCurrentWeather(WeatherLocation weatherLocation) { return 35; } private static class WeatherLocation { @JsonProperty(value = "unit") String unit; @JsonProperty(value = "location") String location; @JsonCreator WeatherLocation(@JsonProperty(value = "unit") String unit, @JsonProperty(value = "location") String location) { this.unit = unit; this.location = location; } public String getUnit() { return unit; } public String getLocation() { return location; } } }
class ChatCompletionsFunctionCall { /** * Runs the sample algorithm and demonstrates how to get chat completions using function call. * * @param args Unused. Arguments to the program. */ public static void main(String[] args) { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildClient(); List<FunctionDefinition> functions = Arrays.asList( new FunctionDefinition("getCurrentWeather") .setDescription("Get the current weather") .setParameters(BinaryData.fromObject(getCurrentWeatherFunctionParameters())) ); List<ChatRequestMessage> chatRequestMessages = new ArrayList<>(); chatRequestMessages.add(new ChatRequestUserMessage("What should I wear in Boston depending on the weather?")); ChatCompletions chatCompletions = client.getChatCompletions(deploymentOrModelId, new ChatCompletionsOptions(chatRequestMessages) .setFunctionCall(FunctionCallConfig.AUTO) .setFunctions(functions)); chatRequestMessages = handleFunctionCallResponse(chatCompletions.getChoices(), chatRequestMessages); ChatCompletions chatCompletionsAnswer = client.getChatCompletions(deploymentOrModelId, new ChatCompletionsOptions(chatRequestMessages)); System.out.printf("Message: %s.%n", chatCompletionsAnswer.getChoices().get(0).getMessage().getContent()); } private static List<ChatRequestMessage> handleFunctionCallResponse(List<ChatChoice> choices, List<ChatRequestMessage> chatMessages) { for (ChatChoice choice : choices) { ChatResponseMessage choiceMessage = choice.getMessage(); FunctionCall functionCall = choiceMessage.getFunctionCall(); if (CompletionsFinishReason.FUNCTION_CALL.equals(choice.getFinishReason())) { System.out.printf("Function name: %s, arguments: %s.%n", functionCall.getName(), functionCall.getArguments()); WeatherLocation weatherLocation = BinaryData.fromString(functionCall.getArguments()) .toObject(WeatherLocation.class); int currentWeather = getCurrentWeather(weatherLocation); chatMessages.add(new ChatRequestUserMessage(String.format("The weather in %s is %d degrees %s.", weatherLocation.getLocation(), currentWeather, weatherLocation.getUnit()))); } else { ChatRequestAssistantMessage messageHistory = new ChatRequestAssistantMessage(choiceMessage.getContent()); messageHistory.setFunctionCall(choiceMessage.getFunctionCall()); chatMessages.add(messageHistory); } } return chatMessages; } private static int getCurrentWeather(WeatherLocation weatherLocation) { return 35; } private static class WeatherLocation { @JsonProperty(value = "unit") String unit; @JsonProperty(value = "location") String location; @JsonCreator WeatherLocation(@JsonProperty(value = "unit") String unit, @JsonProperty(value = "location") String location) { this.unit = unit; this.location = location; } public String getUnit() { return unit; } public String getLocation() { return location; } } }
Any reason the instantiation was split into separate lines?
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.processSync(); } final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); requestLogger.logRequestSync(logger, getRequestLoggingOptions(context)); HttpResponse response = null; try { response = next.processSync(); if (response != null) { response = responseLogger.logResponseSync( logger, getResponseLoggingOptions(response, startNs, context)); } return response; } catch (RuntimeException e) { createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()) .log("HTTP FAILED", e); throw logger.logExceptionAsWarning(e); } }
response = next.processSync();
public HttpResponse processSync(HttpPipelineCallContext context, HttpPipelineNextSyncPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.processSync(); } final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); requestLogger.logRequestSync(logger, getRequestLoggingOptions(context)); try { HttpResponse response = next.processSync(); if (response != null) { response = responseLogger.logResponseSync( logger, getResponseLoggingOptions(response, startNs, context)); } return response; } catch (RuntimeException e) { createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()) .log("HTTP FAILED", e); throw e; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper(); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final int LOGGER_CACHE_MAX_SIZE = 1000; private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>(); private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class); private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintBody; private final HttpRequestLogger requestLogger; private final HttpResponseLogger responseLogger; /** * Key for {@link Context} to pass request retry count metadata for logging. */ public static final String RETRY_COUNT_CONTEXT = "requestRetryCount"; private static final String REQUEST_LOG_MESSAGE = "HTTP request"; private static final String RESPONSE_LOG_MESSAGE = "HTTP response"; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configuration options. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL; this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.prettyPrintBody = false; this.requestLogger = new DefaultHttpRequestLogger(); this.responseLogger = new DefaultHttpResponseLogger(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.prettyPrintBody = httpLogOptions.isPrettyPrintBody(); this.requestLogger = (httpLogOptions.getRequestLogger() == null) ? new DefaultHttpRequestLogger() : httpLogOptions.getRequestLogger(); this.responseLogger = (httpLogOptions.getResponseLogger() == null) ? new DefaultHttpResponseLogger() : httpLogOptions.getResponseLogger(); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return requestLogger.logRequest(logger, getRequestLoggingOptions(context)) .then(next.process()) .flatMap(response -> responseLogger.logResponse(logger, getResponseLoggingOptions(response, startNs, context))) .doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable)); } @Override private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) { LoggingEventBuilder log = logger.atLevel(level); if (LOGGER.canLogAtLevel(level) && request != null) { if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) { String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID); if (clientRequestId != null) { log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId); } } if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) { String traceparent = request.getHeaders().getValue(TRACEPARENT); if (traceparent != null) { log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent); } } } return log; } private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) { return new HttpRequestLoggingContext(callContext.getHttpRequest(), callContext.getContext(), getRequestRetryCount(callContext.getContext())); } private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs, HttpPipelineCallContext callContext) { return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs), callContext.getContext(), getRequestRetryCount(callContext.getContext())); } private final class DefaultHttpRequestLogger implements HttpRequestLogger { @Override public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); if (logger.canLogAtLevel(logLevel)) { log(logLevel, logger, loggingOptions); } return Mono.empty(); } @Override public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); if (logger.canLogAtLevel(logLevel)) { log(logLevel, logger, loggingOptions); } } private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final HttpRequest request = loggingOptions.getHttpRequest(); LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames)); Integer retryCount = loggingOptions.getTryCount(); if (retryCount != null) { logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount); } } if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder); } if (request.getBody() == null) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0) .log(REQUEST_LOG_MESSAGE); return; } String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, request.getHeaders()); logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength); if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) { logBody(request, (int) contentLength, logBuilder, logger, contentType); return; } logBuilder.log(REQUEST_LOG_MESSAGE); } } private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) { BinaryData data = request.getBodyAsBinaryData(); BinaryDataContent content = BinaryDataHelper.getContent(data); if (content instanceof StringContent || content instanceof ByteBufferContent || content instanceof SerializableContent || content instanceof ByteArrayContent) { logBody(logBuilder, logger, contentType, content.toString()); } else if (content instanceof InputStreamContent) { byte[] contentBytes = content.toBytes(); request.setBody(contentBytes); logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8)); } else { AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength); request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer() .doOnNext(byteBuffer -> { try { ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8)))); } } private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) { logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data)) .log(REQUEST_LOG_MESSAGE); } private final class DefaultHttpResponseLogger implements HttpResponseLogger { @Override public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return Mono.just(response); } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return Mono.just(new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody)); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return Mono.just(response); } private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder); } } private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames)) .addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis()); } } private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) { String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH); if (!CoreUtils.isNullOrEmpty(contentLengthString)) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString); } } @Override public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return response; } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return response; } } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) { String query = url.getQuery(); if (CoreUtils.isNullOrEmpty(query)) { return url.toString(); } UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false); CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> { if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) { urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue()); } else { urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER); } }); return urlBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers, LoggingEventBuilder logBuilder) { for (HttpHeader header : headers) { String headerName = header.getName(); logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT)) ? header.getValue() : REDACTED_PLACEHOLDER); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType, String body) { String result = body; if (prettyPrintBody && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON", e); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private static long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } /* * Gets the request retry count to include in logging. * * If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be * logged. */ private static Integer getRequestRetryCount(Context context) { Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null); if (rawRetryCount == null) { return null; } try { return Integer.valueOf(rawRetryCount.toString()); } catch (NumberFormatException ex) { LOGGER.atWarning() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount) .log("Could not parse the request retry count."); return null; } } /* * Get or create the ClientLogger for the method having its request and response logged. */ private static ClientLogger getOrCreateMethodLogger(String methodName) { if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) { CALLER_METHOD_LOGGER_CACHE.clear(); } return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new); } private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) { switch (logLevel) { case ERROR: return logger.atError(); case WARNING: return logger.atWarning(); case INFORMATIONAL: return logger.atInfo(); case VERBOSE: default: return logger.atVerbose(); } } private static final class LoggingHttpResponse extends HttpResponse { private final HttpResponse actualResponse; private final LoggingEventBuilder logBuilder; private final int contentLength; private final ClientLogger logger; private final boolean prettyPrintBody; private final String contentTypeHeader; private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder, ClientLogger logger, int contentLength, String contentTypeHeader, boolean prettyPrintBody) { super(actualResponse.getRequest()); this.actualResponse = actualResponse; this.logBuilder = logBuilder; this.logger = logger; this.contentLength = contentLength; this.contentTypeHeader = contentTypeHeader; this.prettyPrintBody = prettyPrintBody; } @Override public int getStatusCode() { return actualResponse.getStatusCode(); } @Override @Deprecated public String getHeaderValue(String name) { return actualResponse.getHeaderValue(name); } @Override public String getHeaderValue(HttpHeaderName headerName) { return actualResponse.getHeaderValue(headerName); } @Override public HttpHeaders getHeaders() { return actualResponse.getHeaders(); } @Override public Flux<ByteBuffer> getBody() { AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength); return Flux.using(() -> stream, s -> actualResponse.getBody() .doOnNext(byteBuffer -> { try { ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }), s -> doLog(s.toString(StandardCharsets.UTF_8))); } @Override public Mono<byte[]> getBodyAsByteArray() { return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders()); } @Override public Mono<String> getBodyAsString() { return getBodyAsByteArray().map(String::new); } @Override public Mono<String> getBodyAsString(Charset charset) { return getBodyAsByteArray().map(bytes -> new String(bytes, charset)); } @Override public BinaryData getBodyAsBinaryData() { BinaryData content = actualResponse.getBodyAsBinaryData(); doLog(content.toString()); return content; } @Override public void close() { actualResponse.close(); } private void doLog(String body) { logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body)) .log(RESPONSE_LOG_MESSAGE); } } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapperShim PRETTY_PRINTER = ObjectMapperShim.createPrettyPrintMapper(); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final int LOGGER_CACHE_MAX_SIZE = 1000; private static final Map<String, ClientLogger> CALLER_METHOD_LOGGER_CACHE = new ConcurrentHashMap<>(); private static final ClientLogger LOGGER = new ClientLogger(HttpLoggingPolicy.class); private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintBody; private final HttpRequestLogger requestLogger; private final HttpResponseLogger responseLogger; /** * Key for {@link Context} to pass request retry count metadata for logging. */ public static final String RETRY_COUNT_CONTEXT = "requestRetryCount"; private static final String REQUEST_LOG_MESSAGE = "HTTP request"; private static final String RESPONSE_LOG_MESSAGE = "HTTP response"; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configuration options. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.ENVIRONMENT_HTTP_LOG_DETAIL_LEVEL; this.allowedHeaderNames = HttpLogOptions.DEFAULT_HEADERS_ALLOWLIST .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = HttpLogOptions.DEFAULT_QUERY_PARAMS_ALLOWLIST .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.prettyPrintBody = false; this.requestLogger = new DefaultHttpRequestLogger(); this.responseLogger = new DefaultHttpResponseLogger(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.prettyPrintBody = httpLogOptions.isPrettyPrintBody(); this.requestLogger = (httpLogOptions.getRequestLogger() == null) ? new DefaultHttpRequestLogger() : httpLogOptions.getRequestLogger(); this.responseLogger = (httpLogOptions.getResponseLogger() == null) ? new DefaultHttpResponseLogger() : httpLogOptions.getResponseLogger(); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = getOrCreateMethodLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return requestLogger.logRequest(logger, getRequestLoggingOptions(context)) .then(next.process()) .flatMap(response -> responseLogger.logResponse(logger, getResponseLoggingOptions(response, startNs, context))) .doOnError(throwable -> createBasicLoggingContext(logger, LogLevel.WARNING, context.getHttpRequest()).log("HTTP FAILED", throwable)); } @Override private LoggingEventBuilder createBasicLoggingContext(ClientLogger logger, LogLevel level, HttpRequest request) { LoggingEventBuilder log = logger.atLevel(level); if (LOGGER.canLogAtLevel(level) && request != null) { if (allowedHeaderNames.contains(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName())) { String clientRequestId = request.getHeaders().getValue(X_MS_CLIENT_REQUEST_ID); if (clientRequestId != null) { log.addKeyValue(X_MS_CLIENT_REQUEST_ID.getCaseInsensitiveName(), clientRequestId); } } if (allowedHeaderNames.contains(TRACEPARENT.getCaseInsensitiveName())) { String traceparent = request.getHeaders().getValue(TRACEPARENT); if (traceparent != null) { log.addKeyValue(TRACEPARENT.getCaseInsensitiveName(), traceparent); } } } return log; } private HttpRequestLoggingContext getRequestLoggingOptions(HttpPipelineCallContext callContext) { return new HttpRequestLoggingContext(callContext.getHttpRequest(), callContext.getContext(), getRequestRetryCount(callContext.getContext())); } private HttpResponseLoggingContext getResponseLoggingOptions(HttpResponse httpResponse, long startNs, HttpPipelineCallContext callContext) { return new HttpResponseLoggingContext(httpResponse, Duration.ofNanos(System.nanoTime() - startNs), callContext.getContext(), getRequestRetryCount(callContext.getContext())); } private final class DefaultHttpRequestLogger implements HttpRequestLogger { @Override public Mono<Void> logRequest(ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); if (logger.canLogAtLevel(logLevel)) { log(logLevel, logger, loggingOptions); } return Mono.empty(); } @Override public void logRequestSync(ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); if (logger.canLogAtLevel(logLevel)) { log(logLevel, logger, loggingOptions); } } private void log(LogLevel logLevel, ClientLogger logger, HttpRequestLoggingContext loggingOptions) { final HttpRequest request = loggingOptions.getHttpRequest(); LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, request.getHttpMethod()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(request.getUrl(), allowedQueryParameterNames)); Integer retryCount = loggingOptions.getTryCount(); if (retryCount != null) { logBuilder.addKeyValue(LoggingKeys.TRY_COUNT_KEY, retryCount); } } if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, request.getHeaders(), logBuilder); } if (request.getBody() == null) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, 0) .log(REQUEST_LOG_MESSAGE); return; } String contentType = request.getHeaders().getValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, request.getHeaders()); logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLength); if (httpLogDetailLevel.shouldLogBody() && shouldBodyBeLogged(contentType, contentLength)) { logBody(request, (int) contentLength, logBuilder, logger, contentType); return; } logBuilder.log(REQUEST_LOG_MESSAGE); } } private void logBody(HttpRequest request, int contentLength, LoggingEventBuilder logBuilder, ClientLogger logger, String contentType) { BinaryData data = request.getBodyAsBinaryData(); BinaryDataContent content = BinaryDataHelper.getContent(data); if (content instanceof StringContent || content instanceof ByteBufferContent || content instanceof SerializableContent || content instanceof ByteArrayContent) { logBody(logBuilder, logger, contentType, content.toString()); } else if (content instanceof InputStreamContent) { byte[] contentBytes = content.toBytes(); request.setBody(contentBytes); logBody(logBuilder, logger, contentType, new String(contentBytes, StandardCharsets.UTF_8)); } else { AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength); request.setBody(Flux.using(() -> stream, s -> content.toFluxByteBuffer() .doOnNext(byteBuffer -> { try { ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }), s -> logBody(logBuilder, logger, contentType, s.toString(StandardCharsets.UTF_8)))); } } private void logBody(LoggingEventBuilder logBuilder, ClientLogger logger, String contentType, String data) { logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentType, data)) .log(REQUEST_LOG_MESSAGE); } private final class DefaultHttpResponseLogger implements HttpResponseLogger { @Override public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return Mono.just(response); } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return Mono.just(new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody)); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return Mono.just(response); } private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder); } } private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames)) .addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis()); } } private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) { String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH); if (!CoreUtils.isNullOrEmpty(contentLengthString)) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString); } } @Override public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return response; } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return response; } } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private static String getRedactedUrl(URL url, Set<String> allowedQueryParameterNames) { String query = url.getQuery(); if (CoreUtils.isNullOrEmpty(query)) { return url.toString(); } UrlBuilder urlBuilder = ImplUtils.parseUrl(url, false); CoreUtils.parseQueryParameters(query).forEachRemaining(queryParam -> { if (allowedQueryParameterNames.contains(queryParam.getKey().toLowerCase(Locale.ROOT))) { urlBuilder.addQueryParameter(queryParam.getKey(), queryParam.getValue()); } else { urlBuilder.addQueryParameter(queryParam.getKey(), REDACTED_PLACEHOLDER); } }); return urlBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private static void addHeadersToLogMessage(Set<String> allowedHeaderNames, HttpHeaders headers, LoggingEventBuilder logBuilder) { for (HttpHeader header : headers) { String headerName = header.getName(); logBuilder.addKeyValue(headerName, allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT)) ? header.getValue() : REDACTED_PLACEHOLDER); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private static String prettyPrintIfNeeded(ClientLogger logger, boolean prettyPrintBody, String contentType, String body) { String result = body; if (prettyPrintBody && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.log(LogLevel.WARNING, () -> "Failed to pretty print JSON", e); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private static long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException e) { logger.log(LogLevel.INFORMATIONAL, () -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private static boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } /* * Gets the request retry count to include in logging. * * If there is no value set, or it isn't a valid number null will be returned indicating that retry count won't be * logged. */ private static Integer getRequestRetryCount(Context context) { Object rawRetryCount = context.getData(RETRY_COUNT_CONTEXT).orElse(null); if (rawRetryCount == null) { return null; } try { return Integer.valueOf(rawRetryCount.toString()); } catch (NumberFormatException ex) { LOGGER.atInfo() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, rawRetryCount) .log("Could not parse the request retry count."); return null; } } /* * Get or create the ClientLogger for the method having its request and response logged. */ private static ClientLogger getOrCreateMethodLogger(String methodName) { if (CALLER_METHOD_LOGGER_CACHE.size() > LOGGER_CACHE_MAX_SIZE) { CALLER_METHOD_LOGGER_CACHE.clear(); } return CALLER_METHOD_LOGGER_CACHE.computeIfAbsent(methodName, ClientLogger::new); } private static LoggingEventBuilder getLogBuilder(LogLevel logLevel, ClientLogger logger) { switch (logLevel) { case ERROR: return logger.atError(); case WARNING: return logger.atWarning(); case INFORMATIONAL: return logger.atInfo(); case VERBOSE: default: return logger.atVerbose(); } } private static final class LoggingHttpResponse extends HttpResponse { private final HttpResponse actualResponse; private final LoggingEventBuilder logBuilder; private final int contentLength; private final ClientLogger logger; private final boolean prettyPrintBody; private final String contentTypeHeader; private LoggingHttpResponse(HttpResponse actualResponse, LoggingEventBuilder logBuilder, ClientLogger logger, int contentLength, String contentTypeHeader, boolean prettyPrintBody) { super(actualResponse.getRequest()); this.actualResponse = actualResponse; this.logBuilder = logBuilder; this.logger = logger; this.contentLength = contentLength; this.contentTypeHeader = contentTypeHeader; this.prettyPrintBody = prettyPrintBody; } @Override public int getStatusCode() { return actualResponse.getStatusCode(); } @Override @Deprecated public String getHeaderValue(String name) { return actualResponse.getHeaderValue(name); } @Override public String getHeaderValue(HttpHeaderName headerName) { return actualResponse.getHeaderValue(headerName); } @Override public HttpHeaders getHeaders() { return actualResponse.getHeaders(); } @Override public Flux<ByteBuffer> getBody() { AccessibleByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(contentLength); return Flux.using(() -> stream, s -> actualResponse.getBody() .doOnNext(byteBuffer -> { try { ImplUtils.writeByteBufferToStream(byteBuffer.duplicate(), s); } catch (IOException ex) { throw LOGGER.logExceptionAsError(new UncheckedIOException(ex)); } }), s -> doLog(s.toString(StandardCharsets.UTF_8))); } @Override public Mono<byte[]> getBodyAsByteArray() { return FluxUtil.collectBytesFromNetworkResponse(getBody(), actualResponse.getHeaders()); } @Override public Mono<String> getBodyAsString() { return getBodyAsByteArray().map(String::new); } @Override public Mono<String> getBodyAsString(Charset charset) { return getBodyAsByteArray().map(bytes -> new String(bytes, charset)); } @Override public BinaryData getBodyAsBinaryData() { BinaryData content = actualResponse.getBodyAsBinaryData(); doLog(content.toString()); return content; } @Override public void close() { actualResponse.close(); } private void doLog(String body) { logBuilder.addKeyValue(LoggingKeys.BODY_KEY, prettyPrintIfNeeded(logger, prettyPrintBody, contentTypeHeader, body)) .log(RESPONSE_LOG_MESSAGE); } } }
Thoughts on rather than using `doOnSuccess` which is a side-effects operator we put this into a `map` call?
public Mono<Void> setupBlob(BlobAsyncClient blobClient, long blobSize) { if (dataChecksum != -1) { throw LOGGER.logExceptionAsError(new IllegalStateException("setupBlob can't be called again")); } this.blobSize = blobSize; return Mono.using( () -> new CrcInputStream(BLOB_CONTENT_HEAD, blobSize), data -> blobClient .upload(BinaryData.fromStream(data, blobSize)) .then(data.getContentInfo()) .doOnSuccess(info -> dataChecksum = info.getCrc()), CrcInputStream::close) .then(); }
.doOnSuccess(info -> dataChecksum = info.getCrc()),
public Mono<Void> setupBlob(BlobAsyncClient blobClient, long blobSize) { if (dataChecksum != -1) { throw LOGGER.logExceptionAsError(new IllegalStateException("setupBlob can't be called again")); } this.blobSize = blobSize; return Mono.using( () -> new CrcInputStream(BLOB_CONTENT_HEAD, blobSize), data -> blobClient .upload(BinaryData.fromStream(data, blobSize)) .then(data.getContentInfo()), CrcInputStream::close) .map(info -> dataChecksum = info.getCrc()) .then(); }
class OriginalContent { private final static ClientLogger LOGGER = new ClientLogger(OriginalContent.class); private final static Tracer TRACER = TracerProvider.getDefaultProvider().createTracer("unused", null, null, null); private static final String BLOB_CONTENT_HEAD_STRING = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, " + "sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. " + "Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus. Massa sapien faucibus et molestie ac feugiat sed lectus. " + "Sed pulvinar proin gravida hendrerit."; private static final BinaryData BLOB_CONTENT_HEAD = BinaryData.fromString(BLOB_CONTENT_HEAD_STRING); private long dataChecksum = -1; private long blobSize = 0; public OriginalContent() { } public Mono<Boolean> checkMatch(BinaryData data, Context span) { return checkMatch(data.toFluxByteBuffer(), span); } public Mono<Boolean> checkMatch(Flux<ByteBuffer> data, Context span) { return checkMatch(ContentInfo.fromFluxByteBuffer(data), span); } public Mono<Boolean> checkMatch(Mono<ContentInfo> contentInfo, Context span) { if (dataChecksum == -1) { return monoError(LOGGER, new IllegalStateException("setupBlob must complete first")); } return contentInfo .map(info -> { if (info.getCrc() != dataChecksum) { logMismatch(info.getCrc(), info.getLength(), info.getHead(), span); return false; } return true; }); } private void logMismatch(long actualCrc, long actualLength, byte[] actualContentHead, Context span) { try(AutoCloseable scope = TRACER.makeSpanCurrent(span)) { LOGGER.atError() .addKeyValue("expectedCrc", dataChecksum) .addKeyValue("actualCrc", actualCrc) .addKeyValue("expectedLength", blobSize) .addKeyValue("actualLength", actualLength) .addKeyValue("actualContentHead", new String(actualContentHead, 0, (int)Math.min(1024, actualLength), StandardCharsets.UTF_8)) .log("mismatched crc"); } catch (Throwable e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } }
class OriginalContent { private final static ClientLogger LOGGER = new ClientLogger(OriginalContent.class); private final static Tracer TRACER = TracerProvider.getDefaultProvider().createTracer("unused", null, null, null); private static final String BLOB_CONTENT_HEAD_STRING = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, " + "sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. " + "Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus. Massa sapien faucibus et molestie ac feugiat sed lectus. " + "Sed pulvinar proin gravida hendrerit."; private static final BinaryData BLOB_CONTENT_HEAD = BinaryData.fromString(BLOB_CONTENT_HEAD_STRING); private long dataChecksum = -1; private long blobSize = 0; public OriginalContent() { } public Mono<Boolean> checkMatch(BinaryData data, Context span) { return checkMatch(data.toFluxByteBuffer(), span); } public Mono<Boolean> checkMatch(Flux<ByteBuffer> data, Context span) { return checkMatch(ContentInfo.fromFluxByteBuffer(data), span); } public Mono<Boolean> checkMatch(Mono<ContentInfo> contentInfo, Context span) { if (dataChecksum == -1) { return monoError(LOGGER, new IllegalStateException("setupBlob must complete first")); } return contentInfo .map(info -> { if (info.getCrc() != dataChecksum) { logMismatch(info.getCrc(), info.getLength(), info.getHead(), span); return false; } return true; }); } @SuppressWarnings("try") private void logMismatch(long actualCrc, long actualLength, ByteBuffer actualContentHead, Context span) { try(AutoCloseable scope = TRACER.makeSpanCurrent(span)) { LOGGER.atError() .addKeyValue("expectedCrc", dataChecksum) .addKeyValue("actualCrc", actualCrc) .addKeyValue("expectedLength", blobSize) .addKeyValue("actualLength", actualLength) .addKeyValue("actualContentHead", Base64.getEncoder().encode(actualContentHead)) .log("mismatched crc"); } catch (Throwable e) { throw LOGGER.logExceptionAsError(new RuntimeException(e)); } } }
I'd recommend turning this into multiple tests, if the first one fails we don't know if the latter ones fail and may need to iterate many times before the test passes.
public void testAnalyzeAsyncSingleFeatureFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.DENSE_CAPTIONS); ImageAnalysisOptions options = new ImageAnalysisOptionsBuilder().setGenderNeutralCaption(true).build(); doAnalysis(methodName + ":DenseCaptions", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.SMART_CROPS); options = new ImageAnalysisOptionsBuilder().setSmartCropsAspectRatios(Arrays.asList(0.9, 1.33)).build(); doAnalysis(methodName + ":SmartCrops", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); options = new ImageAnalysisOptionsBuilder().setLanguage("en").build(); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.PEOPLE); doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null); }
doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null);
public void testAnalyzeAsyncSingleFeatureFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.DENSE_CAPTIONS); ImageAnalysisOptions options = new ImageAnalysisOptions().setGenderNeutralCaption(true); doAnalysis(methodName + ":DenseCaptions", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.SMART_CROPS); options = new ImageAnalysisOptions().setSmartCropsAspectRatios(Arrays.asList(0.9, 1.33)); doAnalysis(methodName + ":SmartCrops", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); options = new ImageAnalysisOptions().setLanguage("en"); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.PEOPLE); doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null); }
class ImageAnalysisAsyncClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = false; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAllFeaturesFromFile() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAuthenticationFailure() throws MalformedURLException { createClientForAuthenticationFailure(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 401, "Access denied"); } }
class ImageAnalysisAsyncClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = false; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAllFeaturesFromFile() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAuthenticationFailure() throws MalformedURLException { createClientForAuthenticationFailure(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 401, "Access denied"); } }
Same comment with multiple tests
public void testAnalyzeSyncSingleFeatureFromFile() throws MalformedURLException { List<Entry<String, String>> queryParams = new ArrayList<>(); queryParams.add(new SimpleEntry<>("key1", "value1")); queryParams.add(new SimpleEntry<>("key2", "value2")); createClientForStandardAnalysis(sync, queryParams); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; ImageAnalysisOptions options = null; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); doAnalysis(methodName + ":Caption", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.READ); doAnalysis(methodName + ":Read", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); }
doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options);
public void testAnalyzeSyncSingleFeatureFromFile() throws MalformedURLException { List<Entry<String, String>> queryParams = new ArrayList<>(); queryParams.add(new SimpleEntry<>("key1", "value1")); queryParams.add(new SimpleEntry<>("key2", "value2")); createClientForStandardAnalysis(sync, queryParams); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; ImageAnalysisOptions options = null; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); doAnalysis(methodName + ":Caption", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.READ); doAnalysis(methodName + ":Read", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); }
class ImageAnalysisClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = true; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncAllFeaturesFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); Boolean genderNeutralCaption = true; String language = "en"; List<Double> aspectRatios = Arrays.asList(0.9, 1.33); String modelVersion = "latest"; ImageAnalysisOptions options = new ImageAnalysisOptionsBuilder() .setLanguage(language) .setGenderNeutralCaption(genderNeutralCaption) .setSmartCropsAspectRatios(aspectRatios) .setModelVersion(modelVersion) .build(); doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncImageUrlDoesNotExist() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = "https: List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 400, "image url is not accessible"); } }
class ImageAnalysisClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = true; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncAllFeaturesFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); Boolean genderNeutralCaption = true; String language = "en"; List<Double> aspectRatios = Arrays.asList(0.9, 1.33); String modelVersion = "latest"; ImageAnalysisOptions options = new ImageAnalysisOptions( language, genderNeutralCaption, aspectRatios, modelVersion); doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncImageUrlDoesNotExist() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = "https: List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 400, "image url is not accessible"); } }
@alzimmermsft thank you for this comment. Some tests do a single Image Analysis call, other tests (like the ones you pointed out) do multiple Image Analysis calls using the same ImageAnalysisClient. It's important that we have a test or two that do multiple calls using the same client, as this will be the common use case (creating a single client and using it to analyze, for example, a folder full of images). So, I'd rather keep this test as-is if that's okay with you. Reach out to me over IM if you want to discuss this further.
public void testAnalyzeAsyncSingleFeatureFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.DENSE_CAPTIONS); ImageAnalysisOptions options = new ImageAnalysisOptionsBuilder().setGenderNeutralCaption(true).build(); doAnalysis(methodName + ":DenseCaptions", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.SMART_CROPS); options = new ImageAnalysisOptionsBuilder().setSmartCropsAspectRatios(Arrays.asList(0.9, 1.33)).build(); doAnalysis(methodName + ":SmartCrops", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); options = new ImageAnalysisOptionsBuilder().setLanguage("en").build(); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.PEOPLE); doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null); }
doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null);
public void testAnalyzeAsyncSingleFeatureFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.DENSE_CAPTIONS); ImageAnalysisOptions options = new ImageAnalysisOptions().setGenderNeutralCaption(true); doAnalysis(methodName + ":DenseCaptions", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.SMART_CROPS); options = new ImageAnalysisOptions().setSmartCropsAspectRatios(Arrays.asList(0.9, 1.33)); doAnalysis(methodName + ":SmartCrops", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); options = new ImageAnalysisOptions().setLanguage("en"); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.PEOPLE); doAnalysis(methodName + ":People", sync, imageSource, visualFeatures, null); }
class ImageAnalysisAsyncClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = false; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAllFeaturesFromFile() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAuthenticationFailure() throws MalformedURLException { createClientForAuthenticationFailure(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 401, "Access denied"); } }
class ImageAnalysisAsyncClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = false; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAllFeaturesFromFile() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeAsyncAuthenticationFailure() throws MalformedURLException { createClientForAuthenticationFailure(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.TAGS); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 401, "Access denied"); } }
Thanks! see my comment in ImageAnalysisAsyncClientTest.java
public void testAnalyzeSyncSingleFeatureFromFile() throws MalformedURLException { List<Entry<String, String>> queryParams = new ArrayList<>(); queryParams.add(new SimpleEntry<>("key1", "value1")); queryParams.add(new SimpleEntry<>("key2", "value2")); createClientForStandardAnalysis(sync, queryParams); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; ImageAnalysisOptions options = null; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); doAnalysis(methodName + ":Caption", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.READ); doAnalysis(methodName + ":Read", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); }
doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options);
public void testAnalyzeSyncSingleFeatureFromFile() throws MalformedURLException { List<Entry<String, String>> queryParams = new ArrayList<>(); queryParams.add(new SimpleEntry<>("key1", "value1")); queryParams.add(new SimpleEntry<>("key2", "value2")); createClientForStandardAnalysis(sync, queryParams); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageFile; ImageAnalysisOptions options = null; List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); doAnalysis(methodName + ":Caption", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.READ); doAnalysis(methodName + ":Read", sync, imageSource, visualFeatures, options); visualFeatures = Arrays.asList(VisualFeatures.TAGS); doAnalysis(methodName + ":Tags", sync, imageSource, visualFeatures, options); }
class ImageAnalysisClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = true; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncAllFeaturesFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); Boolean genderNeutralCaption = true; String language = "en"; List<Double> aspectRatios = Arrays.asList(0.9, 1.33); String modelVersion = "latest"; ImageAnalysisOptions options = new ImageAnalysisOptionsBuilder() .setLanguage(language) .setGenderNeutralCaption(genderNeutralCaption) .setSmartCropsAspectRatios(aspectRatios) .setModelVersion(modelVersion) .build(); doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncImageUrlDoesNotExist() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = "https: List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 400, "image url is not accessible"); } }
class ImageAnalysisClientTest extends ImageAnalysisClientTestBase { private final Boolean sync = true; /*********************************************************************************** * * HAPPY PATH TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncAllFeaturesFromUrl() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = imageUrl; List<VisualFeatures> visualFeatures = Arrays.asList( VisualFeatures.SMART_CROPS, VisualFeatures.CAPTION, VisualFeatures.DENSE_CAPTIONS, VisualFeatures.OBJECTS, VisualFeatures.PEOPLE, VisualFeatures.READ, VisualFeatures.TAGS); Boolean genderNeutralCaption = true; String language = "en"; List<Double> aspectRatios = Arrays.asList(0.9, 1.33); String modelVersion = "latest"; ImageAnalysisOptions options = new ImageAnalysisOptions( language, genderNeutralCaption, aspectRatios, modelVersion); doAnalysis(methodName, sync, imageSource, visualFeatures, options); } @Test /*********************************************************************************** * * ERROR TESTS * ***********************************************************************************/ @Test public void testAnalyzeSyncImageUrlDoesNotExist() throws MalformedURLException { createClientForStandardAnalysis(sync); String methodName = new Object(){}.getClass().getEnclosingMethod().getName(); String imageSource = "https: List<VisualFeatures> visualFeatures = Arrays.asList(VisualFeatures.CAPTION); ImageAnalysisOptions options = null; doAnalysisWithError(methodName, sync, imageSource, visualFeatures, options, 400, "image url is not accessible"); } }
do we need any checks here?
public int getSize() { return end - start; }
return end - start;
public int getSize() { return end - start; }
class RangeReplaceSubstitution extends Substitution implements Comparable<RangeReplaceSubstitution> { private final int start; private final int end; /** * Create a new Substitution. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param start The starting index of the range replacement substitution. * @param end The ending index of the range replacement substitution. */ private RangeReplaceSubstitution(String urlParameterName, int methodParameterIndex, boolean shouldEncode, int start, int end) { super(urlParameterName, methodParameterIndex, shouldEncode); this.start = start; this.end = end; } /** * Gets the range replacement substitutions for the provided urlParameterName. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param substitutionBase The base string that will be used to find the range replacement substitutions. * @return The range replacement substitutions for the provided urlParameterName. */ public static List<RangeReplaceSubstitution> getRangeReplaceSubstitutions(String urlParameterName, int methodParameterIndex, boolean shouldEncode, String substitutionBase) { List<RangeReplaceSubstitution> substitutions = new ArrayList<>(); String placeholder = "{" + urlParameterName + "}"; int indexOf = 0; while (true) { indexOf = substitutionBase.indexOf(placeholder, indexOf); if (indexOf == -1) { break; } substitutions.add(new RangeReplaceSubstitution(urlParameterName, methodParameterIndex, shouldEncode, indexOf, indexOf + placeholder.length())); indexOf = indexOf + placeholder.length(); } return substitutions; } /** * Gets the starting index of the range replacement substitution. * * @return The starting index of the range replacement substitution. */ public int getStart() { return start; } /** * Gets the ending index of the range replacement substitution. * * @return The ending index of the range replacement substitution. */ public int getEnd() { return end; } /** * Gets the size of the range replacement substitution. * * @return The size of the range replacement substitution. */ @Override public int compareTo(RangeReplaceSubstitution o) { if (start < o.start) { return -1; } else if (start > o.start) { return 1; } else { return Integer.compare(end, o.end); } } @Override public int hashCode() { return Objects.hash(start, end); } @Override public boolean equals(Object obj) { if (!(obj instanceof RangeReplaceSubstitution)) { return false; } RangeReplaceSubstitution other = (RangeReplaceSubstitution) obj; return start == other.start && end == other.end; } }
class RangeReplaceSubstitution extends Substitution implements Comparable<RangeReplaceSubstitution> { private final int start; private final int end; /** * Create a new Substitution. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param start The starting index of the range replacement substitution. * @param end The ending index of the range replacement substitution. */ private RangeReplaceSubstitution(String urlParameterName, int methodParameterIndex, boolean shouldEncode, int start, int end) { super(urlParameterName, methodParameterIndex, shouldEncode); this.start = start; this.end = end; } /** * Gets the range replacement substitutions for the provided urlParameterName. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param substitutionBase The base string that will be used to find the range replacement substitutions. * @return The range replacement substitutions for the provided urlParameterName. */ public static List<RangeReplaceSubstitution> getRangeReplaceSubstitutions(String urlParameterName, int methodParameterIndex, boolean shouldEncode, String substitutionBase) { List<RangeReplaceSubstitution> substitutions = new ArrayList<>(); String placeholder = "{" + urlParameterName + "}"; int indexOf = 0; while (true) { indexOf = substitutionBase.indexOf(placeholder, indexOf); if (indexOf == -1) { break; } substitutions.add(new RangeReplaceSubstitution(urlParameterName, methodParameterIndex, shouldEncode, indexOf, indexOf + placeholder.length())); indexOf = indexOf + placeholder.length(); } return substitutions; } /** * Gets the starting index of the range replacement substitution. * * @return The starting index of the range replacement substitution. */ public int getStart() { return start; } /** * Gets the ending index of the range replacement substitution. * * @return The ending index of the range replacement substitution. */ public int getEnd() { return end; } /** * Gets the size of the range replacement substitution. * * @return The size of the range replacement substitution. */ @Override public int compareTo(RangeReplaceSubstitution o) { if (start < o.start) { return -1; } else if (start > o.start) { return 1; } else { return Integer.compare(end, o.end); } } @Override public int hashCode() { return Objects.hash(start, end); } @Override public boolean equals(Object obj) { if (!(obj instanceof RangeReplaceSubstitution)) { return false; } RangeReplaceSubstitution other = (RangeReplaceSubstitution) obj; return start == other.start && end == other.end; } }
This should be fine as `start` is always the index where the replacement is found and `end` is `start` + replacement length, which will always be greater than 0.
public int getSize() { return end - start; }
return end - start;
public int getSize() { return end - start; }
class RangeReplaceSubstitution extends Substitution implements Comparable<RangeReplaceSubstitution> { private final int start; private final int end; /** * Create a new Substitution. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param start The starting index of the range replacement substitution. * @param end The ending index of the range replacement substitution. */ private RangeReplaceSubstitution(String urlParameterName, int methodParameterIndex, boolean shouldEncode, int start, int end) { super(urlParameterName, methodParameterIndex, shouldEncode); this.start = start; this.end = end; } /** * Gets the range replacement substitutions for the provided urlParameterName. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param substitutionBase The base string that will be used to find the range replacement substitutions. * @return The range replacement substitutions for the provided urlParameterName. */ public static List<RangeReplaceSubstitution> getRangeReplaceSubstitutions(String urlParameterName, int methodParameterIndex, boolean shouldEncode, String substitutionBase) { List<RangeReplaceSubstitution> substitutions = new ArrayList<>(); String placeholder = "{" + urlParameterName + "}"; int indexOf = 0; while (true) { indexOf = substitutionBase.indexOf(placeholder, indexOf); if (indexOf == -1) { break; } substitutions.add(new RangeReplaceSubstitution(urlParameterName, methodParameterIndex, shouldEncode, indexOf, indexOf + placeholder.length())); indexOf = indexOf + placeholder.length(); } return substitutions; } /** * Gets the starting index of the range replacement substitution. * * @return The starting index of the range replacement substitution. */ public int getStart() { return start; } /** * Gets the ending index of the range replacement substitution. * * @return The ending index of the range replacement substitution. */ public int getEnd() { return end; } /** * Gets the size of the range replacement substitution. * * @return The size of the range replacement substitution. */ @Override public int compareTo(RangeReplaceSubstitution o) { if (start < o.start) { return -1; } else if (start > o.start) { return 1; } else { return Integer.compare(end, o.end); } } @Override public int hashCode() { return Objects.hash(start, end); } @Override public boolean equals(Object obj) { if (!(obj instanceof RangeReplaceSubstitution)) { return false; } RangeReplaceSubstitution other = (RangeReplaceSubstitution) obj; return start == other.start && end == other.end; } }
class RangeReplaceSubstitution extends Substitution implements Comparable<RangeReplaceSubstitution> { private final int start; private final int end; /** * Create a new Substitution. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param start The starting index of the range replacement substitution. * @param end The ending index of the range replacement substitution. */ private RangeReplaceSubstitution(String urlParameterName, int methodParameterIndex, boolean shouldEncode, int start, int end) { super(urlParameterName, methodParameterIndex, shouldEncode); this.start = start; this.end = end; } /** * Gets the range replacement substitutions for the provided urlParameterName. * * @param urlParameterName The name that is used between curly quotes as a placeholder in the target URL. * @param methodParameterIndex The index of the parameter in the original interface method where the value for the * placeholder is. * @param shouldEncode Whether the value from the method's argument should be encoded when the substitution is * taking place. * @param substitutionBase The base string that will be used to find the range replacement substitutions. * @return The range replacement substitutions for the provided urlParameterName. */ public static List<RangeReplaceSubstitution> getRangeReplaceSubstitutions(String urlParameterName, int methodParameterIndex, boolean shouldEncode, String substitutionBase) { List<RangeReplaceSubstitution> substitutions = new ArrayList<>(); String placeholder = "{" + urlParameterName + "}"; int indexOf = 0; while (true) { indexOf = substitutionBase.indexOf(placeholder, indexOf); if (indexOf == -1) { break; } substitutions.add(new RangeReplaceSubstitution(urlParameterName, methodParameterIndex, shouldEncode, indexOf, indexOf + placeholder.length())); indexOf = indexOf + placeholder.length(); } return substitutions; } /** * Gets the starting index of the range replacement substitution. * * @return The starting index of the range replacement substitution. */ public int getStart() { return start; } /** * Gets the ending index of the range replacement substitution. * * @return The ending index of the range replacement substitution. */ public int getEnd() { return end; } /** * Gets the size of the range replacement substitution. * * @return The size of the range replacement substitution. */ @Override public int compareTo(RangeReplaceSubstitution o) { if (start < o.start) { return -1; } else if (start > o.start) { return 1; } else { return Integer.compare(end, o.end); } } @Override public int hashCode() { return Objects.hash(start, end); } @Override public boolean equals(Object obj) { if (!(obj instanceof RangeReplaceSubstitution)) { return false; } RangeReplaceSubstitution other = (RangeReplaceSubstitution) obj; return start == other.start && end == other.end; } }
As discussed offline - force refresh of PKRange/FeedRange is not a good idea every 13 seconds (or any cx configurable interval). Let's make this a random interval of at least 30 minutes or so.
public Mono<Void> refresh(List<Lease> leases) { if (leases != null && !leases.isEmpty()) { this.leaseTokens.set(leases.stream().map(lease -> (FeedRangeEpkImpl)lease.getFeedRange()).collect(Collectors.toList())); } return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, true) .onErrorResume(throwable -> { logger.warn("Refresh pkRanges failed", throwable); return Mono.empty(); }) .then(); }
return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, true)
public Mono<Void> refresh(List<Lease> leases) { if (leases != null && !leases.isEmpty()) { this.leaseTokens.set(leases.stream().map(lease -> (FeedRangeEpkImpl)lease.getFeedRange()).collect(Collectors.toList())); } return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, false) .doOnNext(pkRanges -> { if (!pkRanges.isEmpty()) { for (PartitionKeyRange pkRange : pkRangeToFeedRangeMap.keySet()) { if (!pkRanges.contains(pkRange)) { List<FeedRange> feedRanges = pkRangeToFeedRangeMap.remove(pkRange); logger.debug("PkRange {} does not exist any more, remove it from map. ", pkRange.getId()); for (FeedRange feedRange : feedRanges) { this.feedRangeToThroughputControlGroupConfigMap.remove(feedRange); } } } } }) .onErrorResume(throwable -> { logger.warn("Refresh pkRanges failed", throwable); return Mono.empty(); }) .then(); }
class FeedRangeThroughputControlConfigManager { private static final Logger logger = LoggerFactory.getLogger(FeedRangeThroughputControlConfigManager.class); private final ThroughputControlGroupConfig throughputControlGroupConfig; private final ChangeFeedContextClient documentClient; private final AtomicReference<List<FeedRangeEpkImpl>> leaseTokens; public FeedRangeThroughputControlConfigManager( ThroughputControlGroupConfig throughputControlGroupConfig, ChangeFeedContextClient documentClient) { checkNotNull(throughputControlGroupConfig, "Argument 'throughputControlGroupConfig' can not be null"); checkNotNull(documentClient, "Argument 'documentClient' can not be null"); this.throughputControlGroupConfig = throughputControlGroupConfig; this.documentClient = documentClient; this.leaseTokens = new AtomicReference<>(); } /** * This method will be called during the leases load balancing time from {@link com.azure.cosmos.implementation.changefeed.common.EqualPartitionsBalancingStrategy}. * We are going to track the up to date all leases and refresh the partitionKeyRangesCache * * @param leases all the current leases in lease container. * @return a representation of the deferred computation of this call. */ public Mono<ThroughputControlGroupConfig> getThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); return this.documentClient.getOverlappingRanges(feedRange.getRange(), false) .flatMap(partitionKeyRanges -> { if (partitionKeyRanges.isEmpty()) { return Mono.error(new IllegalStateException("Failed to get overlapping partition key range for range " + feedRange)); } if (partitionKeyRanges.size() > 1) { return Mono.error(new IllegalStateException("There are more than one partition key ranges mapped to the lease feed range. This should never happen")); } long leasesBelongToSamePartitionKeyRange = this.leaseTokens .get() .stream() .filter(leaseToken -> leaseToken.getRange().getMin().compareTo(partitionKeyRanges.get(0).getMinInclusive()) >= 0 && leaseToken.getRange().getMax().compareTo(partitionKeyRanges.get(0).getMaxExclusive()) <= 0) .count(); return Mono.just( getThroughputControlGroupConfigInternal( feedRange, leasesBelongToSamePartitionKeyRange)); }) .onErrorResume(throwable -> { logger.warn("getThroughputControlConfigForLeaseFeedRange failed, using divide factor 1", throwable); return Mono.just( getThroughputControlGroupConfigInternal(feedRange, 1)); }); } private ThroughputControlGroupConfig getThroughputControlGroupConfigInternal( FeedRange feedRange, long perPartitionDivideFactor) { ThroughputControlGroupConfigBuilder throughputControlGroupConfigForFeedRangeBuilder = new ThroughputControlGroupConfigBuilder() .groupName(this.throughputControlGroupConfig.getGroupName() + "-" + feedRange.toString()) .continueOnInitError(this.throughputControlGroupConfig.continueOnInitError()); if (this.throughputControlGroupConfig.getTargetThroughput() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughput( (int) Math.max( this.throughputControlGroupConfig.getTargetThroughput() / perPartitionDivideFactor, 1 )); } if (this.throughputControlGroupConfig.getTargetThroughputThreshold() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughputThreshold( this.throughputControlGroupConfig.getTargetThroughputThreshold() / perPartitionDivideFactor); } if (this.throughputControlGroupConfig.getPriorityLevel() != null) { throughputControlGroupConfigForFeedRangeBuilder.priorityLevel(this.throughputControlGroupConfig.getPriorityLevel()); } ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = throughputControlGroupConfigForFeedRangeBuilder.build(); this.documentClient.getContainerClient().enableLocalThroughputControlGroup(throughputControlGroupConfigForFeedRange); return throughputControlGroupConfigForFeedRange; } }
class FeedRangeThroughputControlConfigManager { private static final Logger logger = LoggerFactory.getLogger(FeedRangeThroughputControlConfigManager.class); private final ThroughputControlGroupConfig throughputControlGroupConfig; private final ChangeFeedContextClient documentClient; private final AtomicReference<List<FeedRangeEpkImpl>> leaseTokens; private final Map<PartitionKeyRange, List<FeedRange>> pkRangeToFeedRangeMap; private final Map<FeedRange, ThroughputControlGroupConfig> feedRangeToThroughputControlGroupConfigMap; public FeedRangeThroughputControlConfigManager( ThroughputControlGroupConfig throughputControlGroupConfig, ChangeFeedContextClient documentClient) { checkNotNull(throughputControlGroupConfig, "Argument 'throughputControlGroupConfig' can not be null"); checkNotNull(documentClient, "Argument 'documentClient' can not be null"); this.throughputControlGroupConfig = throughputControlGroupConfig; this.documentClient = documentClient; this.leaseTokens = new AtomicReference<>(); this.pkRangeToFeedRangeMap = new ConcurrentHashMap<>(); this.feedRangeToThroughputControlGroupConfigMap = new ConcurrentHashMap<>(); } /** * This method will be called during the leases load balancing time from {@link com.azure.cosmos.implementation.changefeed.common.EqualPartitionsBalancingStrategy}. * We are going to track the up to date all leases and refresh the partitionKeyRangesCache * * @param leases all the current leases in lease container. * @return a representation of the deferred computation of this call. */ public Mono<ThroughputControlGroupConfig> getOrCreateThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.feedRangeToThroughputControlGroupConfigMap.get(feedRange); if (throughputControlGroupConfigForFeedRange != null) { return Mono.just(throughputControlGroupConfigForFeedRange); } return this.createThroughputControlConfigForFeedRange(feedRange); } public Mono<ThroughputControlGroupConfig> createThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); return this.documentClient.getOverlappingRanges(feedRange.getRange(), false) .flatMap(partitionKeyRanges -> { if (partitionKeyRanges.isEmpty()) { return Mono.error(new IllegalStateException("Failed to get overlapping partition key range for range " + feedRange)); } if (partitionKeyRanges.size() > 1) { return Mono.error(new IllegalStateException("There are more than one partition key ranges mapped to the lease feed range. This should never happen")); } this.pkRangeToFeedRangeMap.compute(partitionKeyRanges.get(0), (key, feedRangeList) -> { if (feedRangeList == null) { feedRangeList = new ArrayList<>(); } feedRangeList.add(feedRange); return feedRangeList; }); long leasesBelongToSamePartitionKeyRange = this.leaseTokens .get() .stream() .filter(leaseToken -> leaseToken.getRange().getMin().compareTo(partitionKeyRanges.get(0).getMinInclusive()) >= 0 && leaseToken.getRange().getMax().compareTo(partitionKeyRanges.get(0).getMaxExclusive()) <= 0) .count(); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.getThroughputControlGroupConfigInternal(feedRange, leasesBelongToSamePartitionKeyRange); return Mono.just( this.feedRangeToThroughputControlGroupConfigMap.compute(feedRange, (key, config) -> throughputControlGroupConfigForFeedRange) ); }) .onErrorResume(throwable -> { logger.warn("getThroughputControlConfigForLeaseFeedRange failed, using divide factor 1", throwable); return Mono.just( getThroughputControlGroupConfigInternal(feedRange, 1)); }); } private ThroughputControlGroupConfig getThroughputControlGroupConfigInternal( FeedRange feedRange, long perPartitionDivideFactor) { ThroughputControlGroupConfigBuilder throughputControlGroupConfigForFeedRangeBuilder = new ThroughputControlGroupConfigBuilder() .groupName(this.throughputControlGroupConfig.getGroupName() + "-" + feedRange.toString()) .continueOnInitError(this.throughputControlGroupConfig.continueOnInitError()); if (this.throughputControlGroupConfig.getTargetThroughput() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughput( (int) Math.max( this.throughputControlGroupConfig.getTargetThroughput() / perPartitionDivideFactor, 1 )); } if (this.throughputControlGroupConfig.getTargetThroughputThreshold() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughputThreshold( this.throughputControlGroupConfig.getTargetThroughputThreshold() / perPartitionDivideFactor); } if (this.throughputControlGroupConfig.getPriorityLevel() != null) { throughputControlGroupConfigForFeedRangeBuilder.priorityLevel(this.throughputControlGroupConfig.getPriorityLevel()); } ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = throughputControlGroupConfigForFeedRangeBuilder.build(); this.documentClient.getContainerClient().enableLocalThroughputControlGroup(throughputControlGroupConfigForFeedRange); return throughputControlGroupConfigForFeedRange; } }
Same for this. No need for this to be in flatMap.
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.hasMoreResults = true; this.checkpointer.setCancellationToken(cancellationToken); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.tryGetThroughputControlConfigForFeedRange(this.lease); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.hasMoreResults && this.resultException == null) { return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> { if (throughputControlGroupConfigForFeedRange != null) { this.options.setThroughputControlGroupName(throughputControlGroupConfigForFeedRange.getGroupName()); } return this.documentClient.createDocumentChangeFeedQuery( this.settings.getCollectionSelfLink(), this.options, JsonNode.class).limitRequest(1); }) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); this.hasMoreResults = !ModelBridgeInternal.noChanges(documentFeedResponse); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT_OR_MERGE: { this.resultException = new FeedRangeGoneException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
if (throughputControlGroupConfigForFeedRange != null) {
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.hasMoreResults = true; this.checkpointer.setCancellationToken(cancellationToken); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.tryGetThroughputControlConfigForFeedRange(this.lease); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.hasMoreResults && this.resultException == null) { return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> { if (throughputControlGroupConfigForFeedRange != null) { this.options.setThroughputControlGroupName(throughputControlGroupConfigForFeedRange.getGroupName()); } return this.documentClient.createDocumentChangeFeedQuery( this.settings.getCollectionSelfLink(), this.options, JsonNode.class).limitRequest(1); }) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); this.hasMoreResults = !ModelBridgeInternal.noChanges(documentFeedResponse); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT_OR_MERGE: { this.resultException = new FeedRangeGoneException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver<JsonNode> observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean hasMoreResults; private final FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager; public PartitionProcessorImpl(ChangeFeedObserver<JsonNode> observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkPointer, Lease lease, FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkPointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); ImplementationBridgeHelpers.CosmosChangeFeedRequestOptionsHelper.getCosmosChangeFeedRequestOptionsAccessor() .setHeader( this.options, HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, String.valueOf(HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES_NONE)); this.feedRangeThroughputControlConfigManager = feedRangeThroughputControlConfigManager; } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } private ThroughputControlGroupConfig tryGetThroughputControlConfigForFeedRange(Lease lease) { if (this.feedRangeThroughputControlConfigManager == null) { return null; } return this.feedRangeThroughputControlConfigManager.getThroughputControlConfigForFeedRange(lease.getFeedRange()); } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext<JsonNode> context = new ChangeFeedObserverContextImpl<>( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver<JsonNode> observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean hasMoreResults; private final FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager; public PartitionProcessorImpl(ChangeFeedObserver<JsonNode> observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkPointer, Lease lease, FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkPointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); ImplementationBridgeHelpers.CosmosChangeFeedRequestOptionsHelper.getCosmosChangeFeedRequestOptionsAccessor() .setHeader( this.options, HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, String.valueOf(HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES_NONE)); this.feedRangeThroughputControlConfigManager = feedRangeThroughputControlConfigManager; } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } private ThroughputControlGroupConfig tryGetThroughputControlConfigForFeedRange(Lease lease) { if (this.feedRangeThroughputControlConfigManager == null) { return null; } return this.feedRangeThroughputControlConfigManager.getThroughputControlConfigForFeedRange(lease.getFeedRange()); } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext<JsonNode> context = new ChangeFeedObserverContextImpl<>( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
You are right Fabian, I do not need to to forceRefresh here. If a lease is being assigned to a CFP with staled cache, the ReadFeed requests will fail which eventually will trigger a forceRefresh the logic has been changed into following: In the `FeedRangeThroughputControlConfigManager`, maintain the following two maps: pkRangesToFeedRange feedRangeToThroughputControlGroupConfig when refresh() being called, which usually happens during the lease load balancing stage, it will check whether the pkRange still exists in the current cache, if not, then we know some change has happened for the pkRange, its presence will be removed from the map, and then all the mapped feedRanges throughputControlGroupConfig will be removed from the feedRangeToThroughputControlGroupConfig map. when the feedRange being empty, it will trigger a re-recreation of the config so the partitionDivideFactor will be re-calcualted
public Mono<Void> refresh(List<Lease> leases) { if (leases != null && !leases.isEmpty()) { this.leaseTokens.set(leases.stream().map(lease -> (FeedRangeEpkImpl)lease.getFeedRange()).collect(Collectors.toList())); } return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, true) .onErrorResume(throwable -> { logger.warn("Refresh pkRanges failed", throwable); return Mono.empty(); }) .then(); }
return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, true)
public Mono<Void> refresh(List<Lease> leases) { if (leases != null && !leases.isEmpty()) { this.leaseTokens.set(leases.stream().map(lease -> (FeedRangeEpkImpl)lease.getFeedRange()).collect(Collectors.toList())); } return this.documentClient.getOverlappingRanges(PartitionKeyInternalHelper.FullRange, false) .doOnNext(pkRanges -> { if (!pkRanges.isEmpty()) { for (PartitionKeyRange pkRange : pkRangeToFeedRangeMap.keySet()) { if (!pkRanges.contains(pkRange)) { List<FeedRange> feedRanges = pkRangeToFeedRangeMap.remove(pkRange); logger.debug("PkRange {} does not exist any more, remove it from map. ", pkRange.getId()); for (FeedRange feedRange : feedRanges) { this.feedRangeToThroughputControlGroupConfigMap.remove(feedRange); } } } } }) .onErrorResume(throwable -> { logger.warn("Refresh pkRanges failed", throwable); return Mono.empty(); }) .then(); }
class FeedRangeThroughputControlConfigManager { private static final Logger logger = LoggerFactory.getLogger(FeedRangeThroughputControlConfigManager.class); private final ThroughputControlGroupConfig throughputControlGroupConfig; private final ChangeFeedContextClient documentClient; private final AtomicReference<List<FeedRangeEpkImpl>> leaseTokens; public FeedRangeThroughputControlConfigManager( ThroughputControlGroupConfig throughputControlGroupConfig, ChangeFeedContextClient documentClient) { checkNotNull(throughputControlGroupConfig, "Argument 'throughputControlGroupConfig' can not be null"); checkNotNull(documentClient, "Argument 'documentClient' can not be null"); this.throughputControlGroupConfig = throughputControlGroupConfig; this.documentClient = documentClient; this.leaseTokens = new AtomicReference<>(); } /** * This method will be called during the leases load balancing time from {@link com.azure.cosmos.implementation.changefeed.common.EqualPartitionsBalancingStrategy}. * We are going to track the up to date all leases and refresh the partitionKeyRangesCache * * @param leases all the current leases in lease container. * @return a representation of the deferred computation of this call. */ public Mono<ThroughputControlGroupConfig> getThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); return this.documentClient.getOverlappingRanges(feedRange.getRange(), false) .flatMap(partitionKeyRanges -> { if (partitionKeyRanges.isEmpty()) { return Mono.error(new IllegalStateException("Failed to get overlapping partition key range for range " + feedRange)); } if (partitionKeyRanges.size() > 1) { return Mono.error(new IllegalStateException("There are more than one partition key ranges mapped to the lease feed range. This should never happen")); } long leasesBelongToSamePartitionKeyRange = this.leaseTokens .get() .stream() .filter(leaseToken -> leaseToken.getRange().getMin().compareTo(partitionKeyRanges.get(0).getMinInclusive()) >= 0 && leaseToken.getRange().getMax().compareTo(partitionKeyRanges.get(0).getMaxExclusive()) <= 0) .count(); return Mono.just( getThroughputControlGroupConfigInternal( feedRange, leasesBelongToSamePartitionKeyRange)); }) .onErrorResume(throwable -> { logger.warn("getThroughputControlConfigForLeaseFeedRange failed, using divide factor 1", throwable); return Mono.just( getThroughputControlGroupConfigInternal(feedRange, 1)); }); } private ThroughputControlGroupConfig getThroughputControlGroupConfigInternal( FeedRange feedRange, long perPartitionDivideFactor) { ThroughputControlGroupConfigBuilder throughputControlGroupConfigForFeedRangeBuilder = new ThroughputControlGroupConfigBuilder() .groupName(this.throughputControlGroupConfig.getGroupName() + "-" + feedRange.toString()) .continueOnInitError(this.throughputControlGroupConfig.continueOnInitError()); if (this.throughputControlGroupConfig.getTargetThroughput() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughput( (int) Math.max( this.throughputControlGroupConfig.getTargetThroughput() / perPartitionDivideFactor, 1 )); } if (this.throughputControlGroupConfig.getTargetThroughputThreshold() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughputThreshold( this.throughputControlGroupConfig.getTargetThroughputThreshold() / perPartitionDivideFactor); } if (this.throughputControlGroupConfig.getPriorityLevel() != null) { throughputControlGroupConfigForFeedRangeBuilder.priorityLevel(this.throughputControlGroupConfig.getPriorityLevel()); } ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = throughputControlGroupConfigForFeedRangeBuilder.build(); this.documentClient.getContainerClient().enableLocalThroughputControlGroup(throughputControlGroupConfigForFeedRange); return throughputControlGroupConfigForFeedRange; } }
class FeedRangeThroughputControlConfigManager { private static final Logger logger = LoggerFactory.getLogger(FeedRangeThroughputControlConfigManager.class); private final ThroughputControlGroupConfig throughputControlGroupConfig; private final ChangeFeedContextClient documentClient; private final AtomicReference<List<FeedRangeEpkImpl>> leaseTokens; private final Map<PartitionKeyRange, List<FeedRange>> pkRangeToFeedRangeMap; private final Map<FeedRange, ThroughputControlGroupConfig> feedRangeToThroughputControlGroupConfigMap; public FeedRangeThroughputControlConfigManager( ThroughputControlGroupConfig throughputControlGroupConfig, ChangeFeedContextClient documentClient) { checkNotNull(throughputControlGroupConfig, "Argument 'throughputControlGroupConfig' can not be null"); checkNotNull(documentClient, "Argument 'documentClient' can not be null"); this.throughputControlGroupConfig = throughputControlGroupConfig; this.documentClient = documentClient; this.leaseTokens = new AtomicReference<>(); this.pkRangeToFeedRangeMap = new ConcurrentHashMap<>(); this.feedRangeToThroughputControlGroupConfigMap = new ConcurrentHashMap<>(); } /** * This method will be called during the leases load balancing time from {@link com.azure.cosmos.implementation.changefeed.common.EqualPartitionsBalancingStrategy}. * We are going to track the up to date all leases and refresh the partitionKeyRangesCache * * @param leases all the current leases in lease container. * @return a representation of the deferred computation of this call. */ public Mono<ThroughputControlGroupConfig> getOrCreateThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.feedRangeToThroughputControlGroupConfigMap.get(feedRange); if (throughputControlGroupConfigForFeedRange != null) { return Mono.just(throughputControlGroupConfigForFeedRange); } return this.createThroughputControlConfigForFeedRange(feedRange); } public Mono<ThroughputControlGroupConfig> createThroughputControlConfigForFeedRange(FeedRangeEpkImpl feedRange) { checkNotNull(feedRange, "Argument 'feedRange' can not be null"); return this.documentClient.getOverlappingRanges(feedRange.getRange(), false) .flatMap(partitionKeyRanges -> { if (partitionKeyRanges.isEmpty()) { return Mono.error(new IllegalStateException("Failed to get overlapping partition key range for range " + feedRange)); } if (partitionKeyRanges.size() > 1) { return Mono.error(new IllegalStateException("There are more than one partition key ranges mapped to the lease feed range. This should never happen")); } this.pkRangeToFeedRangeMap.compute(partitionKeyRanges.get(0), (key, feedRangeList) -> { if (feedRangeList == null) { feedRangeList = new ArrayList<>(); } feedRangeList.add(feedRange); return feedRangeList; }); long leasesBelongToSamePartitionKeyRange = this.leaseTokens .get() .stream() .filter(leaseToken -> leaseToken.getRange().getMin().compareTo(partitionKeyRanges.get(0).getMinInclusive()) >= 0 && leaseToken.getRange().getMax().compareTo(partitionKeyRanges.get(0).getMaxExclusive()) <= 0) .count(); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.getThroughputControlGroupConfigInternal(feedRange, leasesBelongToSamePartitionKeyRange); return Mono.just( this.feedRangeToThroughputControlGroupConfigMap.compute(feedRange, (key, config) -> throughputControlGroupConfigForFeedRange) ); }) .onErrorResume(throwable -> { logger.warn("getThroughputControlConfigForLeaseFeedRange failed, using divide factor 1", throwable); return Mono.just( getThroughputControlGroupConfigInternal(feedRange, 1)); }); } private ThroughputControlGroupConfig getThroughputControlGroupConfigInternal( FeedRange feedRange, long perPartitionDivideFactor) { ThroughputControlGroupConfigBuilder throughputControlGroupConfigForFeedRangeBuilder = new ThroughputControlGroupConfigBuilder() .groupName(this.throughputControlGroupConfig.getGroupName() + "-" + feedRange.toString()) .continueOnInitError(this.throughputControlGroupConfig.continueOnInitError()); if (this.throughputControlGroupConfig.getTargetThroughput() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughput( (int) Math.max( this.throughputControlGroupConfig.getTargetThroughput() / perPartitionDivideFactor, 1 )); } if (this.throughputControlGroupConfig.getTargetThroughputThreshold() != null) { throughputControlGroupConfigForFeedRangeBuilder.targetThroughputThreshold( this.throughputControlGroupConfig.getTargetThroughputThreshold() / perPartitionDivideFactor); } if (this.throughputControlGroupConfig.getPriorityLevel() != null) { throughputControlGroupConfigForFeedRangeBuilder.priorityLevel(this.throughputControlGroupConfig.getPriorityLevel()); } ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = throughputControlGroupConfigForFeedRangeBuilder.build(); this.documentClient.getContainerClient().enableLocalThroughputControlGroup(throughputControlGroupConfigForFeedRange); return throughputControlGroupConfigForFeedRange; } }
Can simplify this a bit ```suggestion long len = length.getAndAdd(bb.remaining()); if (len < head.length) { ByteBuffer dup = bb.duplicate(); dup.get(head, (int)len, Math.min(dup.remaining(), head.length - (int)len)); } crc.update(bb); ```
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) { AtomicLong length = new AtomicLong(0); byte[] head = new byte[1024]; Mono<Long> crcMono = data .reduce(new CRC32(), (crc, bb) -> { long len = length.get(); if (len < head.length) { ByteBuffer dup = bb.duplicate(); dup.get(head, (int)len, Math.min(dup.remaining(), head.length - (int)len)); } length.addAndGet(bb.remaining()); crc.update(bb); return crc; }) .map(CRC32::getValue); return crcMono.map(crc -> new ContentInfo(crc, length.get(), head)); }
crc.update(bb);
public static Mono<ContentInfo> fromFluxByteBuffer(Flux<ByteBuffer> data) { AtomicLong length = new AtomicLong(0); ByteBuffer head = ByteBuffer.allocate(1024); Mono<Long> crcMono = data .reduce(new CRC32(), (crc, bb) -> { length.getAndAdd(bb.remaining()); if (head.hasRemaining()) { ByteBuffer dup = bb.duplicate(); while (head.hasRemaining() && dup.hasRemaining()) { head.put(dup.get()); } } crc.update(bb); return crc; }) .map(CRC32::getValue); return crcMono.map(crc -> new ContentInfo(crc, length.get(), head)); }
class ContentInfo { private final long length; private final long crc; private final byte[] head; public ContentInfo(long crc, long length, byte[] head) { this.crc = crc; this.length = length; this.head = head; } public long getLength() { return length; } public long getCrc() { return crc; } public byte[] getHead() { return head; } }
class ContentInfo { private final long length; private final long crc; private final ByteBuffer head; public ContentInfo(long crc, long length, ByteBuffer head) { this.crc = crc; this.length = length; this.head = head; } public long getLength() { return length; } public long getCrc() { return crc; } public ByteBuffer getHead() { return head; } }
This one can not, we will need to set the throughput control name for each request, because we reconstruct the options after we get the response https://github.com/Azure/azure-sdk-for-java/blob/1c2f550b23802f10c48280b1e019c41c88a9d73f/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/changefeed/epkversion/PartitionProcessorImpl.java#L143C23-L143C23
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.hasMoreResults = true; this.checkpointer.setCancellationToken(cancellationToken); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.tryGetThroughputControlConfigForFeedRange(this.lease); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.hasMoreResults && this.resultException == null) { return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> { if (throughputControlGroupConfigForFeedRange != null) { this.options.setThroughputControlGroupName(throughputControlGroupConfigForFeedRange.getGroupName()); } return this.documentClient.createDocumentChangeFeedQuery( this.settings.getCollectionSelfLink(), this.options, JsonNode.class).limitRequest(1); }) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); this.hasMoreResults = !ModelBridgeInternal.noChanges(documentFeedResponse); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT_OR_MERGE: { this.resultException = new FeedRangeGoneException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
if (throughputControlGroupConfigForFeedRange != null) {
public Mono<Void> run(CancellationToken cancellationToken) { logger.info("Partition {}: processing task started with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); this.hasMoreResults = true; this.checkpointer.setCancellationToken(cancellationToken); ThroughputControlGroupConfig throughputControlGroupConfigForFeedRange = this.tryGetThroughputControlConfigForFeedRange(this.lease); return Flux.just(this) .flatMap( value -> { if (cancellationToken.isCancellationRequested()) { return Flux.empty(); } if(this.hasMoreResults && this.resultException == null) { return Flux.just(value); } Instant stopTimer = Instant.now().plus(this.settings.getFeedPollDelay()); return Mono.just(value) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat( () -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) .flatMap(value -> { if (throughputControlGroupConfigForFeedRange != null) { this.options.setThroughputControlGroupName(throughputControlGroupConfigForFeedRange.getGroupName()); } return this.documentClient.createDocumentChangeFeedQuery( this.settings.getCollectionSelfLink(), this.options, JsonNode.class).limitRequest(1); }) .flatMap(documentFeedResponse -> { if (cancellationToken.isCancellationRequested()) return Flux.error(new TaskCancelledException()); final String continuationToken = documentFeedResponse.getContinuationToken(); final ChangeFeedState continuationState = ChangeFeedState.fromString(continuationToken); checkNotNull(continuationState, "Argument 'continuationState' must not be null."); checkArgument( continuationState .getContinuation() .getContinuationTokenCount() == 1, "For ChangeFeedProcessor the continuation state should always have one range/continuation"); this.lastServerContinuationToken = continuationState .getContinuation() .getCurrentContinuationToken() .getToken(); this.hasMoreResults = !ModelBridgeInternal.noChanges(documentFeedResponse); if (documentFeedResponse.getResults() != null && documentFeedResponse.getResults().size() > 0) { logger.info("Partition {}: processing {} feeds with owner {}.", this.lease.getLeaseToken(), documentFeedResponse.getResults().size(), this.lease.getOwner()); return this.dispatchChanges(documentFeedResponse, continuationState) .doOnError(throwable -> logger.debug( "Exception was thrown from thread {}", Thread.currentThread().getId(), throwable)) .doOnSuccess((Void) -> { this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) throw new TaskCancelledException(); }); } this.options = CosmosChangeFeedRequestOptions .createForProcessingFromContinuation(continuationToken); if (cancellationToken.isCancellationRequested()) { return Flux.error(new TaskCancelledException()); } return Flux.empty(); }) .doOnComplete(() -> { if (this.options.getMaxItemCount() != this.settings.getMaxItemCount()) { this.options.setMaxItemCount(this.settings.getMaxItemCount()); } }) .onErrorResume(throwable -> { if (throwable instanceof CosmosException) { CosmosException clientException = (CosmosException) throwable; logger.warn("CosmosException: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), clientException); StatusCodeErrorType docDbError = ExceptionClassifier.classifyClientException(clientException); switch (docDbError) { case PARTITION_NOT_FOUND: { this.resultException = new PartitionNotFoundException( "Partition not found.", this.lastServerContinuationToken); } break; case PARTITION_SPLIT_OR_MERGE: { this.resultException = new FeedRangeGoneException( "Partition split.", this.lastServerContinuationToken); } break; case UNDEFINED: { this.resultException = new RuntimeException(clientException); } break; case MAX_ITEM_COUNT_TOO_LARGE: { if (this.options.getMaxItemCount() <= 1) { logger.error( "Cannot reduce maxItemCount further as it's already at {}", this.options.getMaxItemCount(), clientException); this.resultException = new RuntimeException(clientException); } this.options.setMaxItemCount(this.options.getMaxItemCount() / 2); logger.warn("Reducing maxItemCount, new value: {}", this.options.getMaxItemCount()); return Flux.empty(); } case TRANSIENT_ERROR: { if (clientException.getRetryAfterDuration().toMillis() > 0) { Instant stopTimer = Instant.now().plus(clientException.getRetryAfterDuration().toMillis(), MILLIS); return Mono.just(clientException.getRetryAfterDuration().toMillis()) .delayElement(Duration.ofMillis(100), CosmosSchedulers.COSMOS_PARALLEL) .repeat(() -> { Instant currentTime = Instant.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).flatMap(values -> Flux.empty()); } } break; default: { logger.error("Unrecognized Cosmos exception returned error code {}", docDbError, clientException); this.resultException = new RuntimeException(clientException); } } } else if (throwable instanceof LeaseLostException) { logger.info("LeaseLoseException with Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner()); this.resultException = (LeaseLostException) throwable; } else if (throwable instanceof TaskCancelledException) { logger.debug("Task cancelled exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = (TaskCancelledException) throwable; } else { logger.warn("Unexpected exception: Partition {} from thread {} with owner {}", this.lease.getLeaseToken(), Thread.currentThread().getId(), this.lease.getOwner(), throwable); this.resultException = new RuntimeException(throwable); } return Flux.error(throwable); }) .repeat(() -> { if (cancellationToken.isCancellationRequested()) { this.resultException = new TaskCancelledException(); return false; } return true; }) .onErrorResume(throwable -> { if (this.resultException == null) { this.resultException = new RuntimeException(throwable); } return Flux.empty(); }) .then() .doFinally( any -> { logger.info("Partition {}: processing task exited with owner {}.", this.lease.getLeaseToken(), this.lease.getOwner()); }); }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver<JsonNode> observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean hasMoreResults; private final FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager; public PartitionProcessorImpl(ChangeFeedObserver<JsonNode> observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkPointer, Lease lease, FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkPointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); ImplementationBridgeHelpers.CosmosChangeFeedRequestOptionsHelper.getCosmosChangeFeedRequestOptionsAccessor() .setHeader( this.options, HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, String.valueOf(HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES_NONE)); this.feedRangeThroughputControlConfigManager = feedRangeThroughputControlConfigManager; } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } private ThroughputControlGroupConfig tryGetThroughputControlConfigForFeedRange(Lease lease) { if (this.feedRangeThroughputControlConfigManager == null) { return null; } return this.feedRangeThroughputControlConfigManager.getThroughputControlConfigForFeedRange(lease.getFeedRange()); } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext<JsonNode> context = new ChangeFeedObserverContextImpl<>( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
class PartitionProcessorImpl implements PartitionProcessor { private static final Logger logger = LoggerFactory.getLogger(PartitionProcessorImpl.class); private final ProcessorSettings settings; private final PartitionCheckpointer checkpointer; private final ChangeFeedObserver<JsonNode> observer; private volatile CosmosChangeFeedRequestOptions options; private final ChangeFeedContextClient documentClient; private final Lease lease; private volatile RuntimeException resultException; private volatile String lastServerContinuationToken; private volatile boolean hasMoreResults; private final FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager; public PartitionProcessorImpl(ChangeFeedObserver<JsonNode> observer, ChangeFeedContextClient documentClient, ProcessorSettings settings, PartitionCheckpointer checkPointer, Lease lease, FeedRangeThroughputControlConfigManager feedRangeThroughputControlConfigManager) { this.observer = observer; this.documentClient = documentClient; this.settings = settings; this.checkpointer = checkPointer; this.lease = lease; ChangeFeedState state = settings.getStartState(); this.options = ModelBridgeInternal.createChangeFeedRequestOptionsForChangeFeedState(state); this.options.setMaxItemCount(settings.getMaxItemCount()); ImplementationBridgeHelpers.CosmosChangeFeedRequestOptionsHelper.getCosmosChangeFeedRequestOptionsAccessor() .setHeader( this.options, HttpConstants.HttpHeaders.SDK_SUPPORTED_CAPABILITIES, String.valueOf(HttpConstants.SDKSupportedCapabilities.SUPPORTED_CAPABILITIES_NONE)); this.feedRangeThroughputControlConfigManager = feedRangeThroughputControlConfigManager; } @Override private FeedRangePartitionKeyRangeImpl getPkRangeFeedRangeFromStartState() { final FeedRangeInternal feedRange = this.settings.getStartState().getFeedRange(); checkNotNull(feedRange, "FeedRange must not be null here."); checkArgument( feedRange instanceof FeedRangePartitionKeyRangeImpl, "FeedRange must be a PkRangeId FeedRange when using Lease V1 contract."); return (FeedRangePartitionKeyRangeImpl)feedRange; } private ThroughputControlGroupConfig tryGetThroughputControlConfigForFeedRange(Lease lease) { if (this.feedRangeThroughputControlConfigManager == null) { return null; } return this.feedRangeThroughputControlConfigManager.getThroughputControlConfigForFeedRange(lease.getFeedRange()); } @Override public RuntimeException getResultException() { return this.resultException; } private Mono<Void> dispatchChanges( FeedResponse<JsonNode> response, ChangeFeedState continuationState) { ChangeFeedObserverContext<JsonNode> context = new ChangeFeedObserverContextImpl<>( this.getPkRangeFeedRangeFromStartState().getPartitionKeyRangeId(), response, continuationState, this.checkpointer); return this.observer.processChanges(context, response.getResults()); } }
Any reason for the change from `Exception` to `Throwable` while adding `Exception` as a checked exception to `RunInternal`?
public void run() { Context span = telemetryHelper.getTracer().start("run", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { if (runInternal(span)) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } } catch (Throwable e) { if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) { telemetryHelper.trackCancellation(span); } else { telemetryHelper.trackFailure(span, e); } } }
} catch (Throwable e) {
public void run() { Context span = telemetryHelper.getTracer().start("run", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { if (runInternal(span)) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } } catch (Throwable e) { if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) { telemetryHelper.trackCancellation(span); } else { telemetryHelper.trackFailure(span, e); } } }
class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> { private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID(); private final BlobServiceClient syncClient; private final BlobServiceAsyncClient asyncClient; private final BlobServiceAsyncClient asyncNoFaultClient; private final BlobContainerClient syncContainerClient; private final BlobContainerAsyncClient asyncContainerClient; private final BlobContainerAsyncClient asyncNoFaultContainerClient; private final TelemetryHelper telemetryHelper; public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) { super(options); this.telemetryHelper = telemetryHelper; String connectionString = options.getConnectionString(); Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder() .connectionString(connectionString) .httpLogOptions(getLogOptions()); asyncNoFaultClient = clientBuilder.buildAsyncClient(); if (options.isFaultInjectionEnabled()) { clientBuilder.httpClient(new HttpFaultInjectingHttpClient( HttpClient.createDefault(), false, getFaultProbabilities())); } syncClient = clientBuilder.buildClient(); asyncClient = clientBuilder.buildAsyncClient(); asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME); asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME); } @Override public Mono<Void> globalSetupAsync() { telemetryHelper.logStart(options); return super.globalSetupAsync() .then(asyncNoFaultContainerClient.createIfNotExists()) .then(); } @Override public Mono<Void> globalCleanupAsync() { telemetryHelper.logEnd(); return asyncNoFaultContainerClient.deleteIfExists() .then(super.globalCleanupAsync()); } @SuppressWarnings("try") @Override @SuppressWarnings("try") @Override public Mono<Void> runAsync() { Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { return runInternalAsync(span) .doOnCancel(() -> telemetryHelper.trackCancellation(span)) .doOnError(e -> telemetryHelper.trackFailure(span, e)) .doOnNext(match -> { if (match) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } }) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span)) .then() .onErrorResume(e -> Mono.empty()); } catch (Throwable e) { return Mono.empty(); } } protected abstract boolean runInternal(Context context) throws Exception; protected abstract Mono<Boolean> runInternalAsync(Context context); protected BlobContainerClient getSyncContainerClient() { return syncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClient() { return asyncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClientNoFault() { return asyncNoFaultContainerClient; } private static HttpLogOptions getLogOptions() { return new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.HEADERS) .addAllowedHeaderName("x-ms-faultinjector-response-option") .addAllowedHeaderName("Content-Range") .addAllowedHeaderName("Accept-Ranges") .addAllowedHeaderName("x-ms-blob-content-md5") .addAllowedHeaderName("x-ms-error-code") .addAllowedHeaderName("x-ms-range"); } private static FaultInjectionProbabilities getFaultProbabilities() { return new FaultInjectionProbabilities() .setNoResponseIndefinite(0.003D) .setNoResponseClose(0.004D) .setNoResponseAbort(0.003D) .setPartialResponseIndefinite(0.06) .setPartialResponseClose(0.06) .setPartialResponseAbort(0.06) .setPartialResponseFinishNormal(0.06); } }
class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> { private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID(); private final BlobServiceClient syncClient; private final BlobServiceAsyncClient asyncClient; private final BlobServiceAsyncClient asyncNoFaultClient; private final BlobContainerClient syncContainerClient; private final BlobContainerAsyncClient asyncContainerClient; private final BlobContainerAsyncClient asyncNoFaultContainerClient; private final TelemetryHelper telemetryHelper; public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) { super(options); this.telemetryHelper = telemetryHelper; String connectionString = options.getConnectionString(); Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder() .connectionString(connectionString) .httpLogOptions(getLogOptions()); asyncNoFaultClient = clientBuilder.buildAsyncClient(); if (options.isFaultInjectionEnabled()) { clientBuilder.httpClient(new HttpFaultInjectingHttpClient( HttpClient.createDefault(), false, getFaultProbabilities())); } syncClient = clientBuilder.buildClient(); asyncClient = clientBuilder.buildAsyncClient(); asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME); asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME); } @Override public Mono<Void> globalSetupAsync() { telemetryHelper.logStart(options); return super.globalSetupAsync() .then(asyncNoFaultContainerClient.createIfNotExists()) .then(); } @Override public Mono<Void> globalCleanupAsync() { telemetryHelper.logEnd(); return asyncNoFaultContainerClient.deleteIfExists() .then(super.globalCleanupAsync()); } @SuppressWarnings("try") @Override @SuppressWarnings("try") @Override public Mono<Void> runAsync() { Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { return runInternalAsync(span) .doOnCancel(() -> telemetryHelper.trackCancellation(span)) .doOnError(e -> telemetryHelper.trackFailure(span, e)) .doOnNext(match -> { if (match) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } }) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span)) .then() .onErrorResume(e -> Mono.empty()); } catch (Throwable e) { return Mono.empty(); } } protected abstract boolean runInternal(Context context) throws Exception; protected abstract Mono<Boolean> runInternalAsync(Context context); protected BlobContainerClient getSyncContainerClient() { return syncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClient() { return asyncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClientNoFault() { return asyncNoFaultContainerClient; } private static HttpLogOptions getLogOptions() { return new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.HEADERS) .addAllowedHeaderName("x-ms-faultinjector-response-option") .addAllowedHeaderName("Content-Range") .addAllowedHeaderName("Accept-Ranges") .addAllowedHeaderName("x-ms-blob-content-md5") .addAllowedHeaderName("x-ms-error-code") .addAllowedHeaderName("x-ms-range"); } private static FaultInjectionProbabilities getFaultProbabilities() { return new FaultInjectionProbabilities() .setNoResponseIndefinite(0.003D) .setNoResponseClose(0.004D) .setNoResponseAbort(0.003D) .setPartialResponseIndefinite(0.06) .setPartialResponseClose(0.06) .setPartialResponseAbort(0.06) .setPartialResponseFinishNormal(0.06); } }
Any reason for closing here while we're using a try-with-resource block which will close this after we leave the scope?
protected boolean runInternal(Context span) throws IOException { try (CrcOutputStream outputStream = new CrcOutputStream()) { syncClient.downloadStreamWithResponse(outputStream, null, null, null, false, null, span); outputStream.close(); return ORIGINAL_CONTENT.checkMatch(outputStream.getContentInfo(), span).block(); } }
outputStream.close();
protected boolean runInternal(Context span) throws IOException { try (CrcOutputStream outputStream = new CrcOutputStream()) { syncClient.downloadStreamWithResponse(outputStream, null, null, null, false, null, span); outputStream.close(); return ORIGINAL_CONTENT.checkMatch(outputStream.getContentInfo(), span).block(); } }
class DownloadStream extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadStream.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadStream.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadStream(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadStreamWithResponse(null, null, null, false) .flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.getValue(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class DownloadStream extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadStream.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadStream.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadStream(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadStreamWithResponse(null, null, null, false) .flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.getValue(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
Isn't `close` checked with IOException by default? Do we need the try-catch here?
public void close() { try { inputStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }
inputStream.close();
public void close() { try { inputStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }
class CrcInputStream extends InputStream { private final static ClientLogger LOGGER = new ClientLogger(CrcInputStream.class); private final Sinks.One<ContentInfo> sink = Sinks.one(); private final InputStream inputStream; private final CRC32 crc = new CRC32(); private final byte[] head = new byte[1024]; private long length = 0; public CrcInputStream(BinaryData source, long size) { this.inputStream = new RepeatingInputStream(source, size); } public CrcInputStream(InputStream source) { this.inputStream = source; } @Override public synchronized int read() throws IOException { try { int b = inputStream.read(); if (b >= 0) { crc.update(b); if (length < head.length) { head[(int) length] = (byte) b; } length++; } if (b == -1) { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return b; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } @Override public synchronized int read(byte b[], int off, int len) throws IOException { try { int read = inputStream.read(b, off, len); if (read > 0) { crc.update(b, off, read); if (length < head.length) { System.arraycopy(b, off, head, (int)length, Math.min(read, head.length - (int)length)); } length += read; } if (read == -1) { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return read; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } public Mono<ContentInfo> getContentInfo() { return sink.asMono(); } @Override }
class CrcInputStream extends InputStream { private final static ClientLogger LOGGER = new ClientLogger(CrcInputStream.class); private final Sinks.One<ContentInfo> sink = Sinks.one(); private final InputStream inputStream; private final CRC32 crc = new CRC32(); private final ByteBuffer head = ByteBuffer.allocate(1024); private long length = 0; public CrcInputStream(BinaryData source, long size) { this.inputStream = new RepeatingInputStream(source, size); } public CrcInputStream(InputStream source) { this.inputStream = source; } @Override public synchronized int read() throws IOException { try { int b = inputStream.read(); if (b >= 0) { crc.update(b); if (head.hasRemaining()) { head.put((byte) b); } length++; } else { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return b; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } @Override public synchronized int read(byte buf[], int off, int len) throws IOException { try { int read = inputStream.read(buf, off, len); if (read > 0) { length += read; crc.update(buf, off, read); if (head.hasRemaining()) { head.put(buf, off, Math.min(read, head.remaining())); } } else { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return read; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } public Mono<ContentInfo> getContentInfo() { return sink.asMono(); } @Override }
want to catch all `RuntimeExceptions` here and record them - we had some mystery around OOM going on here which was not caught/reported
public void run() { Context span = telemetryHelper.getTracer().start("run", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { if (runInternal(span)) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } } catch (Throwable e) { if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) { telemetryHelper.trackCancellation(span); } else { telemetryHelper.trackFailure(span, e); } } }
} catch (Throwable e) {
public void run() { Context span = telemetryHelper.getTracer().start("run", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { if (runInternal(span)) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } } catch (Throwable e) { if (e.getMessage().contains("Timeout on blocking read") || e instanceof InterruptedException || e instanceof TimeoutException) { telemetryHelper.trackCancellation(span); } else { telemetryHelper.trackFailure(span, e); } } }
class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> { private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID(); private final BlobServiceClient syncClient; private final BlobServiceAsyncClient asyncClient; private final BlobServiceAsyncClient asyncNoFaultClient; private final BlobContainerClient syncContainerClient; private final BlobContainerAsyncClient asyncContainerClient; private final BlobContainerAsyncClient asyncNoFaultContainerClient; private final TelemetryHelper telemetryHelper; public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) { super(options); this.telemetryHelper = telemetryHelper; String connectionString = options.getConnectionString(); Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder() .connectionString(connectionString) .httpLogOptions(getLogOptions()); asyncNoFaultClient = clientBuilder.buildAsyncClient(); if (options.isFaultInjectionEnabled()) { clientBuilder.httpClient(new HttpFaultInjectingHttpClient( HttpClient.createDefault(), false, getFaultProbabilities())); } syncClient = clientBuilder.buildClient(); asyncClient = clientBuilder.buildAsyncClient(); asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME); asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME); } @Override public Mono<Void> globalSetupAsync() { telemetryHelper.logStart(options); return super.globalSetupAsync() .then(asyncNoFaultContainerClient.createIfNotExists()) .then(); } @Override public Mono<Void> globalCleanupAsync() { telemetryHelper.logEnd(); return asyncNoFaultContainerClient.deleteIfExists() .then(super.globalCleanupAsync()); } @SuppressWarnings("try") @Override @SuppressWarnings("try") @Override public Mono<Void> runAsync() { Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { return runInternalAsync(span) .doOnCancel(() -> telemetryHelper.trackCancellation(span)) .doOnError(e -> telemetryHelper.trackFailure(span, e)) .doOnNext(match -> { if (match) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } }) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span)) .then() .onErrorResume(e -> Mono.empty()); } catch (Throwable e) { return Mono.empty(); } } protected abstract boolean runInternal(Context context) throws Exception; protected abstract Mono<Boolean> runInternalAsync(Context context); protected BlobContainerClient getSyncContainerClient() { return syncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClient() { return asyncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClientNoFault() { return asyncNoFaultContainerClient; } private static HttpLogOptions getLogOptions() { return new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.HEADERS) .addAllowedHeaderName("x-ms-faultinjector-response-option") .addAllowedHeaderName("Content-Range") .addAllowedHeaderName("Accept-Ranges") .addAllowedHeaderName("x-ms-blob-content-md5") .addAllowedHeaderName("x-ms-error-code") .addAllowedHeaderName("x-ms-range"); } private static FaultInjectionProbabilities getFaultProbabilities() { return new FaultInjectionProbabilities() .setNoResponseIndefinite(0.003D) .setNoResponseClose(0.004D) .setNoResponseAbort(0.003D) .setPartialResponseIndefinite(0.06) .setPartialResponseClose(0.06) .setPartialResponseAbort(0.06) .setPartialResponseFinishNormal(0.06); } }
class BlobScenarioBase<TOptions extends StorageStressOptions> extends PerfStressTest<TOptions> { private static final String CONTAINER_NAME = "stress-" + UUID.randomUUID(); private final BlobServiceClient syncClient; private final BlobServiceAsyncClient asyncClient; private final BlobServiceAsyncClient asyncNoFaultClient; private final BlobContainerClient syncContainerClient; private final BlobContainerAsyncClient asyncContainerClient; private final BlobContainerAsyncClient asyncNoFaultContainerClient; private final TelemetryHelper telemetryHelper; public BlobScenarioBase(TOptions options, TelemetryHelper telemetryHelper) { super(options); this.telemetryHelper = telemetryHelper; String connectionString = options.getConnectionString(); Objects.requireNonNull(connectionString, "'connectionString' cannot be null."); BlobServiceClientBuilder clientBuilder = new BlobServiceClientBuilder() .connectionString(connectionString) .httpLogOptions(getLogOptions()); asyncNoFaultClient = clientBuilder.buildAsyncClient(); if (options.isFaultInjectionEnabled()) { clientBuilder.httpClient(new HttpFaultInjectingHttpClient( HttpClient.createDefault(), false, getFaultProbabilities())); } syncClient = clientBuilder.buildClient(); asyncClient = clientBuilder.buildAsyncClient(); asyncNoFaultContainerClient = asyncNoFaultClient.getBlobContainerAsyncClient(CONTAINER_NAME); syncContainerClient = syncClient.getBlobContainerClient(CONTAINER_NAME); asyncContainerClient = asyncClient.getBlobContainerAsyncClient(CONTAINER_NAME); } @Override public Mono<Void> globalSetupAsync() { telemetryHelper.logStart(options); return super.globalSetupAsync() .then(asyncNoFaultContainerClient.createIfNotExists()) .then(); } @Override public Mono<Void> globalCleanupAsync() { telemetryHelper.logEnd(); return asyncNoFaultContainerClient.deleteIfExists() .then(super.globalCleanupAsync()); } @SuppressWarnings("try") @Override @SuppressWarnings("try") @Override public Mono<Void> runAsync() { Context span = telemetryHelper.getTracer().start("runAsync", Context.NONE); try (AutoCloseable s = telemetryHelper.getTracer().makeSpanCurrent(span)) { return runInternalAsync(span) .doOnCancel(() -> telemetryHelper.trackCancellation(span)) .doOnError(e -> telemetryHelper.trackFailure(span, e)) .doOnNext(match -> { if (match) { telemetryHelper.trackSuccess(span); } else { telemetryHelper.trackMismatch(span); } }) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", span)) .then() .onErrorResume(e -> Mono.empty()); } catch (Throwable e) { return Mono.empty(); } } protected abstract boolean runInternal(Context context) throws Exception; protected abstract Mono<Boolean> runInternalAsync(Context context); protected BlobContainerClient getSyncContainerClient() { return syncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClient() { return asyncContainerClient; } protected BlobContainerAsyncClient getAsyncContainerClientNoFault() { return asyncNoFaultContainerClient; } private static HttpLogOptions getLogOptions() { return new HttpLogOptions() .setLogLevel(HttpLogDetailLevel.HEADERS) .addAllowedHeaderName("x-ms-faultinjector-response-option") .addAllowedHeaderName("Content-Range") .addAllowedHeaderName("Accept-Ranges") .addAllowedHeaderName("x-ms-blob-content-md5") .addAllowedHeaderName("x-ms-error-code") .addAllowedHeaderName("x-ms-range"); } private static FaultInjectionProbabilities getFaultProbabilities() { return new FaultInjectionProbabilities() .setNoResponseIndefinite(0.003D) .setNoResponseClose(0.004D) .setNoResponseAbort(0.003D) .setPartialResponseIndefinite(0.06) .setPartialResponseClose(0.06) .setPartialResponseAbort(0.06) .setPartialResponseFinishNormal(0.06); } }
The `close` is the only way to tell we're done writing to this stream, and that crc/length/data is all there - we'll need it to be over in the `ORIGINAL_CONTENT.checkMatch`
protected boolean runInternal(Context span) throws IOException { try (CrcOutputStream outputStream = new CrcOutputStream()) { syncClient.downloadStreamWithResponse(outputStream, null, null, null, false, null, span); outputStream.close(); return ORIGINAL_CONTENT.checkMatch(outputStream.getContentInfo(), span).block(); } }
outputStream.close();
protected boolean runInternal(Context span) throws IOException { try (CrcOutputStream outputStream = new CrcOutputStream()) { syncClient.downloadStreamWithResponse(outputStream, null, null, null, false, null, span); outputStream.close(); return ORIGINAL_CONTENT.checkMatch(outputStream.getContentInfo(), span).block(); } }
class DownloadStream extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadStream.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadStream.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadStream(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadStreamWithResponse(null, null, null, false) .flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.getValue(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
class DownloadStream extends BlobScenarioBase<StorageStressOptions> { private static final ClientLogger LOGGER = new ClientLogger(DownloadStream.class); private static final TelemetryHelper TELEMETRY_HELPER = new TelemetryHelper(DownloadStream.class); private static final OriginalContent ORIGINAL_CONTENT = new OriginalContent(); private final BlobClient syncClient; private final BlobAsyncClient asyncClient; private final BlobAsyncClient asyncNoFaultClient; public DownloadStream(StorageStressOptions options) { super(options, TELEMETRY_HELPER); this.asyncNoFaultClient = getAsyncContainerClientNoFault().getBlobAsyncClient(options.getBlobName()); this.syncClient = getSyncContainerClient().getBlobClient(options.getBlobName()); this.asyncClient = getAsyncContainerClient().getBlobAsyncClient(options.getBlobName()); } @Override @Override protected Mono<Boolean> runInternalAsync(Context span) { return asyncClient.downloadStreamWithResponse(null, null, null, false) .flatMap(response -> ORIGINAL_CONTENT.checkMatch(response.getValue(), span)); } @Override public Mono<Void> globalSetupAsync() { return super.globalSetupAsync() .then(ORIGINAL_CONTENT.setupBlob(asyncNoFaultClient, options.getSize())); } @Override public Mono<Void> globalCleanupAsync() { return asyncNoFaultClient.delete() .then(super.globalCleanupAsync()); } }
it makes using it easier, e.g. I can do ```java return Mono.using( () -> new CrcInputStream(BLOB_CONTENT_HEAD, blobSize), data -> .. CrcInputStream::close) ``` If close throws, I'd have to write more boilerplate.
public void close() { try { inputStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }
inputStream.close();
public void close() { try { inputStream.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }
class CrcInputStream extends InputStream { private final static ClientLogger LOGGER = new ClientLogger(CrcInputStream.class); private final Sinks.One<ContentInfo> sink = Sinks.one(); private final InputStream inputStream; private final CRC32 crc = new CRC32(); private final byte[] head = new byte[1024]; private long length = 0; public CrcInputStream(BinaryData source, long size) { this.inputStream = new RepeatingInputStream(source, size); } public CrcInputStream(InputStream source) { this.inputStream = source; } @Override public synchronized int read() throws IOException { try { int b = inputStream.read(); if (b >= 0) { crc.update(b); if (length < head.length) { head[(int) length] = (byte) b; } length++; } if (b == -1) { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return b; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } @Override public synchronized int read(byte b[], int off, int len) throws IOException { try { int read = inputStream.read(b, off, len); if (read > 0) { crc.update(b, off, read); if (length < head.length) { System.arraycopy(b, off, head, (int)length, Math.min(read, head.length - (int)length)); } length += read; } if (read == -1) { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return read; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } public Mono<ContentInfo> getContentInfo() { return sink.asMono(); } @Override }
class CrcInputStream extends InputStream { private final static ClientLogger LOGGER = new ClientLogger(CrcInputStream.class); private final Sinks.One<ContentInfo> sink = Sinks.one(); private final InputStream inputStream; private final CRC32 crc = new CRC32(); private final ByteBuffer head = ByteBuffer.allocate(1024); private long length = 0; public CrcInputStream(BinaryData source, long size) { this.inputStream = new RepeatingInputStream(source, size); } public CrcInputStream(InputStream source) { this.inputStream = source; } @Override public synchronized int read() throws IOException { try { int b = inputStream.read(); if (b >= 0) { crc.update(b); if (head.hasRemaining()) { head.put((byte) b); } length++; } else { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return b; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } @Override public synchronized int read(byte buf[], int off, int len) throws IOException { try { int read = inputStream.read(buf, off, len); if (read > 0) { length += read; crc.update(buf, off, read); if (head.hasRemaining()) { head.put(buf, off, Math.min(read, head.remaining())); } } else { sink.emitValue(new ContentInfo(crc.getValue(), length, head), Sinks.EmitFailureHandler.FAIL_FAST); } return read; } catch (IOException e) { sink.emitError(e, Sinks.EmitFailureHandler.FAIL_FAST); throw LOGGER.logThrowableAsError(e); } } public Mono<ContentInfo> getContentInfo() { return sink.asMono(); } @Override }
nit: Azure Cognitive Search rebranded as Azure AI Search, should update references with the new name
public static void main(String[] args) { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildClient(); String azureSearchEndpoint = "{azure-cognitive-search-endpoint}"; String azureSearchAdminKey = "{azure-cognitive-search-key}"; String azureSearchIndexName = "{azure-cognitive-search-index-name}"; AzureCognitiveSearchChatExtensionConfiguration cognitiveSearchConfiguration = new AzureCognitiveSearchChatExtensionConfiguration( new AzureCognitiveSearchChatExtensionParameters(azureSearchEndpoint, azureSearchIndexName) .setAuthentication(new OnYourDataApiKeyAuthenticationOptions(azureSearchAdminKey)) .setQueryType(AzureCognitiveSearchQueryType.VECTOR_SIMPLE_HYBRID) .setInScope(true) .setTopNDocuments(2) .setEmbeddingDependency(new OnYourDataDeploymentNameVectorizationSource("text-embedding-ada-002")) .setFieldsMapping( new AzureCognitiveSearchIndexFieldMappingOptions() .setTitleField("HotelName") .setContentFields(Arrays.asList("Description")) ) ); String question = "Find out the top hotel in town."; List<ChatRequestMessage> chatMessages = new ArrayList<>(); chatMessages.add(new ChatRequestUserMessage(question)); ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(chatMessages) .setDataSources(Arrays.asList(cognitiveSearchConfiguration)); ChatCompletions chatCompletions = client.getChatCompletions(deploymentOrModelId, chatCompletionsOptions); System.out.println("Question: " + question); for (ChatChoice choice : chatCompletions.getChoices()) { ChatResponseMessage message = choice.getMessage(); System.out.printf("Answer: %s%n%n", message.getContent()); List<ChatResponseMessage> contextMessages = message.getContext().getMessages(); for (ChatResponseMessage contextMessage : contextMessages) { System.out.println("Context Message: "); System.out.println(" - " + contextMessage.getContent()); } } }
String azureSearchIndexName = "{azure-cognitive-search-index-name}";
public static void main(String[] args) { String azureOpenaiKey = "{azure-open-ai-key}"; String endpoint = "{azure-open-ai-endpoint}"; String deploymentOrModelId = "{azure-open-ai-deployment-model-id}"; OpenAIClient client = new OpenAIClientBuilder() .endpoint(endpoint) .credential(new AzureKeyCredential(azureOpenaiKey)) .buildClient(); String azureSearchEndpoint = "{azure-search-endpoint}"; String azureSearchAdminKey = "{azure-search-key}"; String azureSearchIndexName = "{azure-search-index-name}"; AzureCognitiveSearchChatExtensionConfiguration searchConfiguration = new AzureCognitiveSearchChatExtensionConfiguration( new AzureCognitiveSearchChatExtensionParameters(azureSearchEndpoint, azureSearchIndexName) .setAuthentication(new OnYourDataApiKeyAuthenticationOptions(azureSearchAdminKey)) .setQueryType(AzureCognitiveSearchQueryType.VECTOR_SIMPLE_HYBRID) .setInScope(true) .setTopNDocuments(2) .setEmbeddingDependency(new OnYourDataDeploymentNameVectorizationSource("text-embedding-ada-002")) .setFieldsMapping( new AzureCognitiveSearchIndexFieldMappingOptions() .setTitleField("HotelName") .setContentFields(Arrays.asList("Description")) ) ); String question = "Find out the top hotel in town."; List<ChatRequestMessage> chatMessages = new ArrayList<>(); chatMessages.add(new ChatRequestUserMessage(question)); ChatCompletionsOptions chatCompletionsOptions = new ChatCompletionsOptions(chatMessages) .setDataSources(Arrays.asList(searchConfiguration)); ChatCompletions chatCompletions = client.getChatCompletions(deploymentOrModelId, chatCompletionsOptions); System.out.println("Question: " + question); for (ChatChoice choice : chatCompletions.getChoices()) { ChatResponseMessage message = choice.getMessage(); System.out.printf("Answer: %s%n%n", message.getContent()); List<ChatResponseMessage> contextMessages = message.getContext().getMessages(); for (ChatResponseMessage contextMessage : contextMessages) { System.out.println("Context Message: "); System.out.println(" - " + contextMessage.getContent()); } } }
class ChatCompletionsWithYourData { /** * Runs the sample and demonstrates configuration of Azure Cognitive Search as a data source. * * @param args Unused. Arguments to the program. */ }
class ChatCompletionsWithYourData { /** * Runs the sample and demonstrates configuration of Azure AI Search as a data source. * * @param args Unused. Arguments to the program. */ }
line 64 and line 67 both are trying to set the `itemBodyOverride`, what is the use case for `itemBodyOverride`
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; this.itemBodyOverride = null; }
this.itemBodyOverride = null;
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
Used from Encryption - when the modified JsonNode needs to be kept after decryption/encryption
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; this.itemBodyOverride = null; }
this.itemBodyOverride = null;
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
Fixed
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; this.itemBodyOverride = null; }
this.itemBodyOverride = null;
private CosmosItemResponse(ResourceResponse<Document> response, T item, JsonNode itemBodyOverride, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = item; this.itemBodyOverride = itemBodyOverride; boolean hasPayloadStaticValue = item != null; this.hasPayload = () -> hasPayloadStaticValue; }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
class CosmosItemResponse<T> { private final Class<T> itemClassType; private final ItemDeserializer itemDeserializer; private volatile T item; private volatile JsonNode itemBodyOverride; final ResourceResponse<Document> resourceResponse; private InternalObjectNode props; private final AtomicBoolean hasTrackingIdCalculated = new AtomicBoolean(false); private boolean hasTrackingId; private final Supplier<Boolean> hasPayload; CosmosItemResponse(ResourceResponse<Document> response, Class<T> classType, ItemDeserializer itemDeserializer) { this.itemClassType = classType; this.resourceResponse = response; this.itemDeserializer = itemDeserializer; this.item = null; this.hasPayload = () -> response.hasPayload(); this.itemBodyOverride = null; } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") private byte[] getItemAsByteArray() { if (item != null && this.itemClassType == Utils.byteArrayClass) { return (byte[])item; } JsonNode effectiveJson = this.itemBodyOverride != null ? this.itemBodyOverride : this.resourceResponse.getBody(); if (effectiveJson == null) { return null; } return effectiveJson.toString().getBytes(StandardCharsets.UTF_8); } /** * Gets the resource. * * @return the resource */ @SuppressWarnings("unchecked") public T getItem() { if (item != null) { return item; } SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); if (item == null) { synchronized (this) { if (item == null && hasPayload.get()) { if (this.itemClassType == Utils.byteArrayClass) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString().getBytes(StandardCharsets.UTF_8); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == String.class) { Instant serializationStartTime = Instant.now(); JsonNode json = this.resourceResponse.getBody(); item = (T) json.toString(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else if (this.itemClassType == InternalObjectNode.class) { Instant serializationStartTime = Instant.now(); item = (T) getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return item; } else { Instant serializationStartTime = Instant.now(); item = this.resourceResponse.getBody(this.itemClassType); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); } return item; } } } return item; } /** * Gets the itemProperties * * @return the itemProperties */ InternalObjectNode getProperties() { ensureInternalObjectNodeInitialized(); return props; } int getResponsePayloadLength() { return this.resourceResponse.getResponsePayloadLength(); } private void ensureInternalObjectNodeInitialized() { synchronized (this) { if (!this.resourceResponse.hasPayload()) { props = null; } else { props = new InternalObjectNode((ObjectNode)this.resourceResponse.getBody()); } } } /** * Gets the maximum size limit for this entity (in megabytes (MB) for server resources and in count for master * resources). * * @return the max resource quota. */ public String getMaxResourceQuota() { return resourceResponse.getMaxResourceQuota(); } /** * Gets the current size of this entity (in megabytes (MB) for server resources and in count for master resources) * * @return the current resource quota usage. */ public String getCurrentResourceQuotaUsage() { return resourceResponse.getCurrentResourceQuotaUsage(); } /** * Gets the Activity ID for the request. * * @return the activity getId. */ public String getActivityId() { return resourceResponse.getActivityId(); } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { return resourceResponse.getRequestCharge(); } /** * Gets the HTTP status code associated with the response. * * @return the status code. */ public int getStatusCode() { return resourceResponse.getStatusCode(); } /** * Gets the token used for managing client's consistency requirements. * * @return the session token. */ public String getSessionToken() { return resourceResponse.getSessionToken(); } /** * Gets the headers associated with the response. * * @return the response headers. */ public Map<String, String> getResponseHeaders() { return resourceResponse.getResponseHeaders(); } /** * Gets the diagnostics information for the current request to Azure Cosmos DB service. * * @return diagnostics information for the current request to Azure Cosmos DB service. */ public CosmosDiagnostics getDiagnostics() { return resourceResponse.getDiagnostics(); } /** * Gets the end-to-end request latency for the current request to Azure Cosmos DB service. * * @return end-to-end request latency for the current request to Azure Cosmos DB service. */ public Duration getDuration() { return resourceResponse.getDuration(); } /** * Gets the ETag from the response headers. * This is only relevant when getting response from the server. * * Null in case of delete operation. * * @return ETag */ public String getETag() { return resourceResponse.getETag(); } CosmosItemResponse<T> withRemappedStatusCode( int statusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { ResourceResponse<Document> mappedResourceResponse = this.resourceResponse.withRemappedStatusCode(statusCode, additionalRequestCharge); T payload = null; JsonNode itemBodyOverride = null; if (isContentResponseOnWriteEnabled) { payload = this.getItem(); itemBodyOverride = this.itemBodyOverride; } return new CosmosItemResponse<>( mappedResourceResponse, payload, itemBodyOverride, this.itemClassType, this.itemDeserializer); } boolean hasTrackingId(String candidate) { if (this.hasTrackingIdCalculated.compareAndSet(false, true)) { SerializationDiagnosticsContext serializationDiagnosticsContext = BridgeInternal.getSerializationDiagnosticsContext(this.getDiagnostics()); Instant serializationStartTime = Instant.now(); InternalObjectNode itemNode = getProperties(); Instant serializationEndTime = Instant.now(); SerializationDiagnosticsContext.SerializationDiagnostics diagnostics = new SerializationDiagnosticsContext.SerializationDiagnostics( serializationStartTime, serializationEndTime, SerializationDiagnosticsContext.SerializationType.ITEM_DESERIALIZATION ); serializationDiagnosticsContext.addSerializationDiagnostics(diagnostics); return this.hasTrackingId = (itemNode != null && candidate.equals(itemNode.get(Constants.Properties.TRACKING_ID))); } else { return this.hasTrackingId; } } static void initialize() { ImplementationBridgeHelpers.CosmosItemResponseHelper.setCosmosItemResponseBuilderAccessor( new ImplementationBridgeHelpers.CosmosItemResponseHelper.CosmosItemResponseBuilderAccessor() { public <T> CosmosItemResponse<T> createCosmosItemResponse(CosmosItemResponse<byte[]> response, Class<T> classType, ItemDeserializer itemDeserializer) { return new CosmosItemResponse<>( response.resourceResponse, Utils.parse(response.getItemAsByteArray(), classType), response.itemBodyOverride, classType, itemDeserializer); } @Override public <T> CosmosItemResponse<T> withRemappedStatusCode(CosmosItemResponse<T> originalResponse, int newStatusCode, double additionalRequestCharge, boolean isContentResponseOnWriteEnabled) { return originalResponse .withRemappedStatusCode(newStatusCode, additionalRequestCharge, isContentResponseOnWriteEnabled); } public byte[] getByteArrayContent(CosmosItemResponse<byte[]> response) { return response.getItemAsByteArray(); } public void setByteArrayContent(CosmosItemResponse<byte[]> response, Pair<byte[], JsonNode> content) { response.item = content.getLeft(); response.itemBodyOverride = content.getRight(); } public ResourceResponse<Document> getResourceResponse(CosmosItemResponse<byte[]> response) { return response.resourceResponse; } @Override public boolean hasTrackingId(CosmosItemResponse<?> response, String candidate) { checkNotNull(response, "Argument 'response' must not be null."); checkNotNull(candidate, "Argument 'candidate' must not be null."); return response.hasTrackingId(candidate); } }); } static { initialize(); } }
Removed via package private. OpenAI typespec added `filename` to the model. I am checking with Timothee whether we would have advanced design to "link" the real `file` field to the not-real `filename` field. If we can do this, codegen will not need to generate this `fileFilename` for filename (and it's bad naming).
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
}
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return this.file; } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return CoreUtils.clone(this.file); } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
Just FYI: this `filename` field is used as well by the OpenAI dashboard to reference files in the web dashboard, AFAIK. We should sync with the other languages SDK before we introduce behaviour to make an educated guess on the file name.
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
}
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return this.file; } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return CoreUtils.clone(this.file); } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
We should return revert this change to keep the defensive copy of `file`
public byte[] getFile() { return this.file; }
return this.file;
public byte[] getFile() { return CoreUtils.clone(this.file); }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; } }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; } }
Yes, I am aware that the `filename` could be in use somewhere in backend (that is what content-disposition for). I've logged this in typespec multipart design issue https://github.com/microsoft/typespec/issues/2704#issuecomment-1861951548 Basically we would like a better solution in future to say this `filename` property is for content-disposition. And if there is such definition in typespec, emitter would no longer need to do the guess of the name (which here result to the bad `fileFilename`) :-) --- https://github.com/Azure/azure-sdk-for-java/blob/55fb5b37807d8df75942fe31c2a99c9431b4399a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/MultipartDataHelper.java#L146-L167 `formatAudioTranslationOptions` does not add `filename`. So I assume it is only used in content-disposition https://github.com/Azure/azure-sdk-for-java/blob/55fb5b37807d8df75942fe31c2a99c9431b4399a/sdk/openai/azure-ai-openai/src/main/java/com/azure/ai/openai/implementation/MultipartDataHelper.java#L118-L121
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
}
AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return this.file; } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ public byte[] getFile() { return CoreUtils.clone(this.file); } /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ }
Thanks for pointing out. Fixed.
public byte[] getFile() { return this.file; }
return this.file;
public byte[] getFile() { return CoreUtils.clone(this.file); }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; } }
class AudioTranscriptionOptions { /* * The audio data to transcribe. This must be the binary content of a file in one of the supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. */ @JsonProperty(value = "file") private byte[] file; /* * The requested format of the transcription response data, which will influence the content and detail of the * result. */ @Generated @JsonProperty(value = "response_format") private AudioTranscriptionFormat responseFormat; /* * The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language * code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. */ @Generated @JsonProperty(value = "language") private String language; /* * An optional hint to guide the model's style or continue from a prior audio segment. The written language of the * prompt should match the primary spoken language of the audio data. */ @Generated @JsonProperty(value = "prompt") private String prompt; /* * The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. */ @Generated @JsonProperty(value = "temperature") private Double temperature; /* * The model to use for this transcription request. */ @Generated @JsonProperty(value = "model") private String model; /** * Get the file property: The audio data to transcribe. This must be the binary content of a file in one of the * supported media formats: * flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm. * * @return the file value. */ /** * Get the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @return the responseFormat value. */ @Generated public AudioTranscriptionFormat getResponseFormat() { return this.responseFormat; } /** * Set the responseFormat property: The requested format of the transcription response data, which will influence * the content and detail of the result. * * @param responseFormat the responseFormat value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setResponseFormat(AudioTranscriptionFormat responseFormat) { this.responseFormat = responseFormat; return this; } /** * Get the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @return the language value. */ @Generated public String getLanguage() { return this.language; } /** * Set the language property: The primary spoken language of the audio data to be transcribed, supplied as a * two-letter ISO-639-1 language code * such as 'en' or 'fr'. * Providing this known input language is optional but may improve the accuracy and/or latency of transcription. * * @param language the language value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setLanguage(String language) { this.language = language; return this; } /** * Get the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @return the prompt value. */ @Generated public String getPrompt() { return this.prompt; } /** * Set the prompt property: An optional hint to guide the model's style or continue from a prior audio segment. The * written language of the * prompt should match the primary spoken language of the audio data. * * @param prompt the prompt value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setPrompt(String prompt) { this.prompt = prompt; return this; } /** * Get the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @return the temperature value. */ @Generated public Double getTemperature() { return this.temperature; } /** * Set the temperature property: The sampling temperature, between 0 and 1. * Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused * and deterministic. * If set to 0, the model will use log probability to automatically increase the temperature until certain * thresholds are hit. * * @param temperature the temperature value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setTemperature(Double temperature) { this.temperature = temperature; return this; } /** * Get the model property: The model to use for this transcription request. * * @return the model value. */ @Generated public String getModel() { return this.model; } /** * Set the model property: The model to use for this transcription request. * * @param model the model value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setModel(String model) { this.model = model; return this; } /* * The optional filename or descriptive identifier to associate with with the audio data. */ @Generated @JsonProperty(value = "filename") private String filename; /** * Get the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @return the filename value. */ @Generated public String getFilename() { return this.filename; } /** * Set the filename property: The optional filename or descriptive identifier to associate with with the audio * data. * * @param filename the filename value to set. * @return the AudioTranscriptionOptions object itself. */ @Generated public AudioTranscriptionOptions setFilename(String filename) { this.filename = filename; return this; } /* * The filename for file */ @Generated @JsonProperty(value = "file") private String fileFilename = "file"; /** * Creates an instance of AudioTranscriptionOptions class. * * @param file the file value to set. */ @JsonCreator public AudioTranscriptionOptions(@JsonProperty(value = "file") byte[] file) { this.file = file; } /** * Get the fileFilename property: The filename for file. * * @return the fileFilename value. */ String getFileFilename() { return this.fileFilename; } /** * Set the fileFilename property: The filename for file. * * @param fileFilename the fileFilename value to set. * @return the AudioTranscriptionOptions object itself. */ AudioTranscriptionOptions setFileFilename(String fileFilename) { this.fileFilename = fileFilename; return this; } }
Is there any validation we want to do on the destination client name?
public void renameWithUnicodeChars(String specialChar) { ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt"); fileClient.create(512); ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt"); assertNotNull(destClient); }
assertNotNull(destClient);
public void renameWithUnicodeChars(String specialChar) { ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt"); fileClient.create(512); ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt"); assertNotNull(destClient); assertTrue(Utility.urlEncode(destClient.getFileUrl()).contains(Utility.urlEncode(specialChar))); }
class FileApiTests extends FileShareTestBase { private ShareFileClient primaryFileClient; private ShareClient shareClient; private String shareName; private String filePath; private static Map<String, String> testMetadata; private static ShareFileHttpHeaders httpHeaders; private FileSmbProperties smbProperties; private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL"; @BeforeEach public void setup() { shareName = generateShareName(); filePath = generatePathName(); shareClient = shareBuilderHelper(shareName).buildClient(); shareClient.create(); primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); testMetadata = Collections.singletonMap("testmetadata", "value"); httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en") .setContentType("application/octet-stream"); smbProperties = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL)); } @Test public void getFileURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: String fileURL = primaryFileClient.getFileUrl(); assertEquals(expectURL, fileURL); } @Test public void getShareSnapshotURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot(); expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot(); ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot()) .buildClient().getFileClient(filePath); String fileURL = newFileClient.getFileUrl(); assertEquals(expectURL, fileURL); String snapshotEndpoint = String.format("https: shareName, filePath, shareSnapshotInfo.getSnapshot()); ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint); assertEquals(client.getFileUrl(), snapshotEndpoint); } @Test public void exists() { primaryFileClient.create(Constants.KB); assertTrue(primaryFileClient.exists()); } @Test public void doesNotExist() { assertFalse(primaryFileClient.exists()); } @Test public void existsError() { primaryFileClient = fileBuilderHelper(shareName, filePath) .sasToken("sig=dummyToken").buildFileClient(); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists()); assertEquals(e.getResponse().getStatusCode(), 403); } @Test public void createFile() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void createFile4TB() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null, null, null, null, null), 201); } @Test public void createFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void createFileWithArgsFpk() { String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient .createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void createFileWithArgsFp() { smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void createChangeTime() { OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null, null, null, null, null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void createFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null, null); assertEquals(fileClient.getShareName(), shareName); String[] filePath = fileClient.getFilePath().split("/"); assertEquals(fileName, filePath[1]); assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG)); } @Test public void createFileWithArgsError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.createWithResponse(-1, null, null, null, testMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @ParameterizedTest @MethodSource("permissionAndKeySupplier") public void createFilePermissionAndKeyError(String filePermissionKey, String permission) { FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null)); } private static Stream<Arguments> permissionAndKeySupplier() { return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION), Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB)))); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @ParameterizedTest @ValueSource(booleans = {true, false}) public void createFileTrailingDot(boolean allowTrailingDot) { shareClient = getShareClient(shareName, allowTrailingDot, null); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); String fileName = generatePathName(); String fileNameWithDot = fileName + "."; ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot); fileClient.create(1024); List<String> foundFiles = new ArrayList<>(); for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) { foundFiles.add(fileRef.getName()); } if (allowTrailingDot) { assertEquals(fileNameWithDot, foundFiles.get(0)); } else { assertEquals(fileName, foundFiles.get(0)); } } @Test public void uploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient .downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndDownloadDataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadInputStreamNoLength() { primaryFileClient.create(DATA.getDefaultDataSize()); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void parallelUploadInputStreamBadLength() { int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1}; for (int length : lengths) { primaryFileClient.create(DATA.getDefaultDataSize()); assertThrows(Exception.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), length), null, null)); } } @Test public void uploadSuccessfulRetry() { primaryFileClient.create(DATA.getDefaultDataSize()); ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadRangeAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadRangeAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void downloadAllNull() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(DATA.getDefaultBytes(), body); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertNotNull(headers.getContentLength()); assertNotNull(headers.getContentType()); assertNull(headers.getContentMd5()); assertNull(headers.getContentEncoding()); assertNull(headers.getCacheControl()); assertNull(headers.getContentDisposition()); assertNull(headers.getContentLanguage()); } @ParameterizedTest @ValueSource(ints = {0, 1}) public void downloadEmptyFile(int fileSize) { primaryFileClient.create(fileSize); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); primaryFileClient.download(outStream); byte[] result = outStream.toByteArray(); assertEquals(result.length, fileSize); if (fileSize > 0) { assertEquals(0, result[0]); } } /* This is to test the appropriate integration of DownloadResponse, including setting the correct range values on HttpGetterInfo. */ @Test public void downloadWithRetryRange() { /* We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We don't need to check the data here, but we want to ensure that the correct range is set each time. This will test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was constructed in FileClient.download(). */ primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6")); ShareFileRange range = new ShareFileRange(2, 6L); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); RuntimeException e = assertThrows(RuntimeException.class, () -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions() .setRange(range).setRetryOptions(options), null, null)); /* Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is NOT thrown because the types would not match. */ assertInstanceOf(IOException.class, e.getCause()); } @Test public void downloadRetryDefault() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5)); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); failureClient.download(outStream); String bodyStr = outStream.toString(); assertEquals(bodyStr, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void downloadTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(DATA.getDefaultDataSizeLong()); shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); shareFileClient.download(outStream); String downloadedData = outStream.toString(); assertEquals(downloadedData, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void downloadOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileProperties properties = fileClient.getProperties(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(body, DATA.getDefaultBytes()); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertEquals(headers.getContentLength(), properties.getContentLength()); assertEquals(headers.getContentType(), properties.getContentType()); assertEquals(headers.getContentMd5(), properties.getContentMd5()); assertEquals(headers.getContentEncoding(), properties.getContentEncoding()); assertEquals(headers.getCacheControl(), properties.getCacheControl()); assertEquals(headers.getContentDisposition(), properties.getContentDisposition()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void uploadRange4TB() { long fileSize = 4 * Constants.TB; primaryFileClient.create(fileSize); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()) .setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */ ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @ParameterizedTest @ValueSource(longs = { 4 * Constants.MB, 5 * Constants.MB}) public void uploadBufferedRangeGreaterThanMaxPutRange(long length) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeTrailingDot() { primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), null, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @ParameterizedTest @MethodSource("bufferedUploadVariousPartitions") public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper .getRandomBuffer(Math.toIntExact(length))); assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } private static Stream<Arguments> bufferedUploadVariousPartitions() { return Stream.of( Arguments.of(1024L, null), Arguments.of(1024L, 1024L), Arguments.of(1024L, 256L), Arguments.of(4L * Constants.MB, null), Arguments.of(4L * Constants.MB, 1024L), Arguments.of(20L * Constants.MB, null), Arguments.of(20L * Constants.MB, 4L * Constants.MB) ); } @Test public void bufferedUploadErrorPartitionTooBig() { long length = 20 * Constants.MB; long uploadChunkLength = 20 * Constants.MB; primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } @Test public void uploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void parallelUploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadRangeDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadDataRetryOnTransientFailure() { ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); primaryFileClient.create(1024); clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null, null); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadAndClearRange() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void uploadAndClearRangeWithArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRangeWithResponse(7, 1, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void clearRangeTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse( DATA.getDefaultDataSizeLong(), 0, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndClearRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); fileClient.create(fullInfoString.length()); fileClient.uploadRange(fullInfoData, fullInfoString.length()); fileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void clearRangeError() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @Test public void clearRangeErrorArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRangeWithResponse(7, 20, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( DATA.getDefaultInputStream(), size), null, Context.NONE)); assertTrue(e.getMessage().contains(errMsg)); } private static Stream<Arguments> uploadDataLengthMismatchSupplier() { return Stream.of( Arguments.of(6, "more than"), Arguments.of(8, "less than")); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void parallelUploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null)); assertTrue(e.getMessage().contains(errMsg)); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadRangeLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size)); assertTrue(e.getMessage().contains(errMsg)); } @Test public void downloadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L), false, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadFileDoesNotExist() { File uploadFile = new File(testFolder.getPath() + "/fakefile.txt"); if (uploadFile.exists()) { assert uploadFile.delete(); } UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.uploadFromFile(uploadFile.getPath())); assertInstanceOf(NoSuchFileException.class, e.getCause()); uploadFile.delete(); } /* * Tests downloading a file using a default client that doesn't have a HttpClient passed to it. */ @LiveOnly @ParameterizedTest @ValueSource(ints = { 0, 20, 16 * 1024 * 1024, 8 * 1026 * 1024 + 10, 50 * Constants.MB }) public void downloadFileBufferCopy(int fileSize) throws IOException { ShareServiceClient shareServiceClient = new ShareServiceClientBuilder() .connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()) .buildClient(); ShareFileClient fileClient = shareServiceClient.getShareClient(shareName) .createFile(filePath, fileSize); File file = FileShareTestHelper.getRandomFile(fileSize); fileClient.uploadFromFile(file.toPath().toString()); File outFile = new File(generatePathName() + ".txt"); if (outFile.exists()) { assertTrue(outFile.delete()); } fileClient.downloadToFile(outFile.toPath().toString()); assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize)); shareServiceClient.deleteShare(shareName); outFile.delete(); file.delete(); } @Test public void uploadAndDownloadFileExists() throws IOException { String data = "Download file exists"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (!downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.downloadToFile(downloadFile.getPath())); assertInstanceOf(FileAlreadyExistsException.class, e.getCause()); downloadFile.delete(); } @Test public void uploadAndDownloadToFileDoesNotExist() throws IOException { String data = "Download file DoesNotExist"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); primaryFileClient.downloadToFile(downloadFile.getPath()); Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z"); assertEquals(data, scanner.next()); scanner.close(); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangePreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; for (FileLastWrittenMode mode : modes) { primaryFileClient.create(Constants.KB); ShareFileProperties initialProps = primaryFileClient.getProperties(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = primaryFileClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @Disabled("the groovy test was not testing this test properly. need to investigate this test further.") @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void uploadRangeFromURL(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix) .endpoint(primaryFileClient.getFileUrl().toString()) .buildFileClient(); client.create(1024); client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?" + sasToken); ByteArrayOutputStream stream = new ByteArrayOutputStream(); client.download(stream); String result = new String(stream.toByteArray()); for (int i = 0; i < length; i++) { assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i)); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeFromURLOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(fileClient.getShareName()) .setFilePath(fileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); String fileNameDest = generatePathName(); ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest); fileClientDest.create(1024); Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), 1024); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertEquals(stream.toByteArray()[0], 117); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangeFromUrlPreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; primaryFileClient.create(Constants.KB); ShareFileClient destinationClient = shareClient.getFileClient(generatePathName()); destinationClient.create(Constants.KB); ShareFileProperties initialProps = destinationClient.getProperties(); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); StorageSharedKeyCredential credential = StorageSharedKeyCredential .fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); for (FileLastWrittenMode mode : modes) { destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB, primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = destinationClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertTrue(FileShareTestHelper.compareDatesWithPrecision( initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties().getFileLastWriteTime())); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(Constants.KB); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(Constants.KB); sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sasToken = shareClient.generateSas(sasValues); Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse( new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null, null); FileShareTestHelper.assertResponseStatusCode(res, 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); assertThrows(ShareStorageException.class, () -> destinationClient .uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(), sourceClient.getFileUrl()), null, null)); } @Test public void openInputStreamWithRange() throws IOException { primaryFileClient.create(1024); ShareFileRange shareFileRange = new ShareFileRange(5L, 10L); byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8); ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes); primaryFileClient.upload(inputStreamData, dataBytes.length, null); int totalBytesRead = 0; StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange); while (stream.read() != -1) { totalBytesRead++; } stream.close(); assertEquals(6, totalBytesRead); } @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void startCopy(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties, setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null, null, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB); ByteArrayInputStream inputStream = new ByteArrayInputStream(data); sourceClient.uploadRange(inputStream, Constants.KB); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null); poller.waitForCompletion(); assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between" + " the time subscribed and the time we start observing events.") @Test public void startCopyError() { primaryFileClient.create(1024); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null); ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(setFilePermission ? FILE_PERMISSION : null) .setIgnoreReadOnly(ignoreReadOnly) .setArchiveAttribute(setArchiveAttribute) .setPermissionCopyModeType(permissionType); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setIgnoreReadOnly(true) .setArchiveAttribute(true); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsFilePermission() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsChangeTime() { ShareFileInfo client = primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); smbProperties.setFileChangeTime(testResourceNamer.now()); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(), primaryFileClient.getProperties().getSmbProperties().getFileChangeTime()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs) .setFilePermissionKey(filePermissionKey); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @Test public void startCopyWithOptionLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsInvalidLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @Test public void startCopyWithOptionsMetadata() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setMetadata(testMetadata); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsWithOriginalSmbProperties() { primaryFileClient.create(1024); ShareFileProperties initialProperties = primaryFileClient.getProperties(); OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime(); OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime(); OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime(); EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes(); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(true) .setLastWrittenOn(true) .setChangedOn(true) .setFileAttributes(true); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions) .setSmbPropertiesToCopy(list); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); ShareFileProperties resultProperties = primaryFileClient.getProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties() .getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties() .getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties() .getFileChangeTime()); assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn, boolean fileAttributes) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(createdOn) .setLastWrittenOn(lastWrittenOn) .setChangedOn(changedOn) .setFileAttributes(fileAttributes); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFileChangeTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE) .setSmbPropertiesToCopy(list); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void startCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient sourceClient = dirClient.getFileClient(generatePathName()); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); String sourceURL = sourceClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @Test public void abortCopy() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null)); } @Test public void abortCopyInvalidLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); assertThrows(ShareStorageException.class, () -> { SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null); }); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void abortCopyTrailingDot() { ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]); String fileName = generatePathName() + "."; ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null); primaryFileClient.create(Constants.MB); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient(); dest.create(Constants.MB); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void abortCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient sourceClient = dirClient.getFileClient(fileName); sourceClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); String sourceURL = sourceClient.getFileUrl(); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyError() { assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId")); } @Test public void deleteFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void deleteFileTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void deleteFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202); } @Test public void deleteFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.deleteWithResponse(null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void deleteIfExistsFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null), 202); } @Test public void deleteIfExistsFileMin() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); primaryFileClient.deleteIfExists(); } @Test public void deleteIfExistsFileThatDoesNotExist() { ShareFileClient client = shareClient.getFileClient(generateShareName()); Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null); assertFalse(response.getValue()); FileShareTestHelper.assertResponseStatusCode(response, 404); assertFalse(client.exists()); } @Test public void deleteIfExistsFileThatWasAlreadyDeleted() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); assertTrue(primaryFileClient.deleteIfExists()); assertFalse(primaryFileClient.deleteIfExists()); } @Test public void getProperties() { primaryFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void getPropertiesTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void getPropertiesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); ShareFileInfo createInfo = fileClient.create(Constants.KB); ShareFileProperties properties = fileClient.getProperties(); assertEquals(createInfo.getETag(), properties.getETag()); assertEquals(createInfo.getLastModified(), properties.getLastModified()); assertEquals(createInfo.getSmbProperties().getFilePermissionKey(), properties.getSmbProperties().getFilePermissionKey()); assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(), properties.getSmbProperties().getNtfsFileAttributes()); assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(), properties.getSmbProperties().getFileLastWriteTime()); assertEquals(createInfo.getSmbProperties().getFileCreationTime(), properties.getSmbProperties().getFileCreationTime()); assertEquals(createInfo.getSmbProperties().getFileChangeTime(), properties.getSmbProperties().getFileChangeTime()); assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId()); assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId()); } @Test public void getPropertiesError() { ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); assertTrue(ex.getMessage().contains("ResourceNotFound")); } @Test public void setHttpHeadersFpk() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void setHttpHeadersFp() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, FILE_PERMISSION, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void setHttpHeadersChangeTime() { primaryFileClient.create(512); OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setHttpHeadersTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); OffsetDateTime changeTime = testResourceNamer.now(); shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setHttpHeadersOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); httpHeaders = new ShareFileHttpHeaders() .setContentType("application/octet-stream") .setContentDisposition("attachment") .setCacheControl("no-transform") .setContentEncoding("gzip") .setContentLanguage("en"); Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null, null); ShareFileProperties properties = fileClient.getProperties(); FileShareTestHelper.assertResponseStatusCode(res, 200); assertNotNull(res.getValue().getETag()); assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG)); assertEquals(properties.getContentType(), "application/octet-stream"); assertEquals(properties.getContentDisposition(), "attachment"); assertEquals(properties.getCacheControl(), "no-transform"); assertEquals(properties.getContentEncoding(), "gzip"); assertNull(properties.getContentMd5()); } @Test public void setHttpHeadersError() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void setMetadata() { primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient .setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setMetadataTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = shareFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = shareFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setMetadataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null); ShareFileProperties getPropertiesBefore = fileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = fileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @Test public void setMetadataError() { primaryFileClient.create(1024); Map<String, String> errorMetadata = Collections.singletonMap("", "value"); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY); } @Test public void listRanges() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); String fileName = generatePathName() + "."; String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesWithRange() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshot() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot(snapInfo.getSnapshot()) .buildFileClient(); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshotFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot("2020-08-07T16:58:02.0000000Z") .buildFileClient(); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); })); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesOAuth() throws IOException { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); fileClient.uploadFromFile(uploadFile); fileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear, List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) { primaryFileClient.create(4 * Constants.MB); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)), 4 * Constants.MB); String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName()) .createSnapshot() .getSnapshot(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(new ByteArrayInputStream( FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); ClearRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesDiffOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName()) .createSnapshot() .getSnapshot(); List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges(); List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges(); List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges(); List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)), size, it.getStart(), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); FileRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffWithRange() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffLease() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null) .getValue().getRanges().get(0); assertEquals(1024, range.getStart()); assertEquals(1030, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesDiffTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); String fileNameWithDot = generateShareName() + "."; primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L); primaryFileClient.uploadRangeWithResponse(options, null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot); } @Test public void listRangesDiffLeaseFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null, null); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions() .setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesDiffFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges() .get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listHandles() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @Test public void listHandlesWithMaxResult() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listHandlesTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listHandlesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); assertEquals(0, fileClient.listHandles().stream().count()); } @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void listHandlesAccessRights() { ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare"); ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory"); ShareFileClient fileClient = directoryClient.getFileClient("myfile"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void forceCloseHandleMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @Test public void forceCloseHandleInvalidHandleID() { primaryFileClient.create(512); assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId")); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void forceCloseHandleTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void forceCloseHandleOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07") @Test public void forceCloseAllHandlesMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMin() { primaryFileClient.create(512); assertNotNull(primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"}) @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameWithResponse() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()), null, null); ShareFileClient renamedClient = resp.getValue(); assertNotNull(renamedClient.getProperties()); assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12") @Test public void renameSasToken() { ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sas = shareClient.generateSas(sasValues); ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl()); primaryFileClient.create(1024); String fileName = generatePathName(); ShareFileClient destClient = client.rename(fileName); assertNotNull(destClient.getProperties()); assertEquals(fileName, destClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDifferentDirectory() { primaryFileClient.create(512); ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName()); dc.create(); ShareFileClient destinationPath = dc.getFileClient(generatePathName()); ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath()); assertTrue(destinationPath.exists()); assertEquals(destinationPath.getFilePath(), resultClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameReplaceIfExists(boolean replaceIfExists) { primaryFileClient.create(512); ShareFileClient destination = shareClient.getFileClient(generatePathName()); destination.create(512); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath()) .setReplaceIfExists(replaceIfExists), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(replaceIfExists, !exception); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameIgnoreReadOnly(boolean ignoreReadOnly) { primaryFileClient.create(512); FileSmbProperties props = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)); ShareFileClient destinationFile = shareClient.getFileClient(generatePathName()); destinationFile.createWithResponse(512L, null, props, null, null, null, null, null); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath()) .setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(exception, !ignoreReadOnly); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermission() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission), null, null).getValue(); assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermissionAndKeySet() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission) .setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null) .getValue()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameFileSmbProperties() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; String permissionKey = shareClient.createPermission(filePermission); OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5); OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2); OffsetDateTime fileChangeTime = testResourceNamer.now(); FileSmbProperties smbProperties = new FileSmbProperties() .setFilePermissionKey(permissionKey) .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)) .setFileCreationTime(fileCreationTime) .setFileLastWriteTime(fileLastWriteTime) .setFileChangeTime(fileChangeTime); ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSmbProperties(smbProperties), null, null).getValue(); ShareFileProperties destProperties = destClient.getProperties(); assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)); assertNotNull(destProperties.getSmbProperties().getFileCreationTime()); assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(), fileChangeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMetadata() { primaryFileClient.create(512); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties getPropertiesAfter = renamedClient.getProperties(); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void renameTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + "."); primaryFileClient.create(1024); Response<ShareFileClient> response = primaryFileClient .renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null); FileShareTestHelper.assertResponseStatusCode(response, 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameError() { primaryFileClient = shareClient.getFileClient(generatePathName()); assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceAC() { primaryFileClient.create(512); String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceACFail() { primaryFileClient.create(512); setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSourceRequestConditions(src), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestAC() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestACFail() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName) .setDestinationRequestConditions(src).setReplaceIfExists(true), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameContentType() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties props = renamedClient.getProperties(); assertEquals(props.getContentType(), "mytype"); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); String fileRename = generatePathName(); Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null, null); ShareFileClient renamedClient = resp.getValue(); renamedClient.getProperties(); assertEquals(fileRename, renamedClient.getFilePath()); assertThrows(ShareStorageException.class, fileClient::getProperties); } @Test public void getSnapshotId() { String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString(); ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot) .buildFileClient(); assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId()); } @Test public void getShareName() { assertEquals(shareName, primaryFileClient.getShareName()); } @Test public void getFilePath() { assertEquals(filePath, primaryFileClient.getFilePath()); } public void perCallPolicy() { primaryFileClient.create(512); ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(), primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient(); Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null); assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09"); } @Test public void defaultAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(null) /* should default to "https: ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void storageAccountAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName()))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void audienceError() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience("badAudience"))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists); assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode()); } @Test public void audienceFromString() { String url = String.format("https: ShareAudience audience = ShareAudience.fromString(url); String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(audience)); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } /* Uncomment this test when Client Name is enabled with STG 93. @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04") @Test public void listHandlesClientName() { ShareClient client = primaryFileServiceClient.getShareClient("testing"); ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1"); ShareFileClient fileClient = directoryClient.getFileClient("test.txt"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertNotNull(list.get(0).getClientName()); } */ }
class FileApiTests extends FileShareTestBase { private ShareFileClient primaryFileClient; private ShareClient shareClient; private String shareName; private String filePath; private static Map<String, String> testMetadata; private static ShareFileHttpHeaders httpHeaders; private FileSmbProperties smbProperties; private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL"; @BeforeEach public void setup() { shareName = generateShareName(); filePath = generatePathName(); shareClient = shareBuilderHelper(shareName).buildClient(); shareClient.create(); primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); testMetadata = Collections.singletonMap("testmetadata", "value"); httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en") .setContentType("application/octet-stream"); smbProperties = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL)); } @Test public void getFileURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: String fileURL = primaryFileClient.getFileUrl(); assertEquals(expectURL, fileURL); } @Test public void getShareSnapshotURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot(); expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot(); ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot()) .buildClient().getFileClient(filePath); String fileURL = newFileClient.getFileUrl(); assertEquals(expectURL, fileURL); String snapshotEndpoint = String.format("https: shareName, filePath, shareSnapshotInfo.getSnapshot()); ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint); assertEquals(client.getFileUrl(), snapshotEndpoint); } @Test public void exists() { primaryFileClient.create(Constants.KB); assertTrue(primaryFileClient.exists()); } @Test public void doesNotExist() { assertFalse(primaryFileClient.exists()); } @Test public void existsError() { primaryFileClient = fileBuilderHelper(shareName, filePath) .sasToken("sig=dummyToken").buildFileClient(); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists()); assertEquals(e.getResponse().getStatusCode(), 403); } @Test public void createFile() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void createFile4TB() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null, null, null, null, null), 201); } @Test public void createFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void createFileWithArgsFpk() { String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient .createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void createFileWithArgsFp() { smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void createChangeTime() { OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null, null, null, null, null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void createFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null, null); assertEquals(fileClient.getShareName(), shareName); String[] filePath = fileClient.getFilePath().split("/"); assertEquals(fileName, filePath[1]); assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG)); } @Test public void createFileWithArgsError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.createWithResponse(-1, null, null, null, testMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @ParameterizedTest @MethodSource("permissionAndKeySupplier") public void createFilePermissionAndKeyError(String filePermissionKey, String permission) { FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null)); } private static Stream<Arguments> permissionAndKeySupplier() { return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION), Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB)))); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @ParameterizedTest @ValueSource(booleans = {true, false}) public void createFileTrailingDot(boolean allowTrailingDot) { shareClient = getShareClient(shareName, allowTrailingDot, null); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); String fileName = generatePathName(); String fileNameWithDot = fileName + "."; ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot); fileClient.create(1024); List<String> foundFiles = new ArrayList<>(); for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) { foundFiles.add(fileRef.getName()); } if (allowTrailingDot) { assertEquals(fileNameWithDot, foundFiles.get(0)); } else { assertEquals(fileName, foundFiles.get(0)); } } @Test public void uploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient .downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndDownloadDataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadInputStreamNoLength() { primaryFileClient.create(DATA.getDefaultDataSize()); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void parallelUploadInputStreamBadLength() { int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1}; for (int length : lengths) { primaryFileClient.create(DATA.getDefaultDataSize()); assertThrows(Exception.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), length), null, null)); } } @Test public void uploadSuccessfulRetry() { primaryFileClient.create(DATA.getDefaultDataSize()); ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadRangeAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadRangeAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void downloadAllNull() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(DATA.getDefaultBytes(), body); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertNotNull(headers.getContentLength()); assertNotNull(headers.getContentType()); assertNull(headers.getContentMd5()); assertNull(headers.getContentEncoding()); assertNull(headers.getCacheControl()); assertNull(headers.getContentDisposition()); assertNull(headers.getContentLanguage()); } @ParameterizedTest @ValueSource(ints = {0, 1}) public void downloadEmptyFile(int fileSize) { primaryFileClient.create(fileSize); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); primaryFileClient.download(outStream); byte[] result = outStream.toByteArray(); assertEquals(result.length, fileSize); if (fileSize > 0) { assertEquals(0, result[0]); } } /* This is to test the appropriate integration of DownloadResponse, including setting the correct range values on HttpGetterInfo. */ @Test public void downloadWithRetryRange() { /* We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We don't need to check the data here, but we want to ensure that the correct range is set each time. This will test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was constructed in FileClient.download(). */ primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6")); ShareFileRange range = new ShareFileRange(2, 6L); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); RuntimeException e = assertThrows(RuntimeException.class, () -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions() .setRange(range).setRetryOptions(options), null, null)); /* Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is NOT thrown because the types would not match. */ assertInstanceOf(IOException.class, e.getCause()); } @Test public void downloadRetryDefault() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5)); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); failureClient.download(outStream); String bodyStr = outStream.toString(); assertEquals(bodyStr, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void downloadTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(DATA.getDefaultDataSizeLong()); shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); shareFileClient.download(outStream); String downloadedData = outStream.toString(); assertEquals(downloadedData, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void downloadOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileProperties properties = fileClient.getProperties(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(body, DATA.getDefaultBytes()); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertEquals(headers.getContentLength(), properties.getContentLength()); assertEquals(headers.getContentType(), properties.getContentType()); assertEquals(headers.getContentMd5(), properties.getContentMd5()); assertEquals(headers.getContentEncoding(), properties.getContentEncoding()); assertEquals(headers.getCacheControl(), properties.getCacheControl()); assertEquals(headers.getContentDisposition(), properties.getContentDisposition()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void uploadRange4TB() { long fileSize = 4 * Constants.TB; primaryFileClient.create(fileSize); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()) .setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */ ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @ParameterizedTest @ValueSource(longs = { 4 * Constants.MB, 5 * Constants.MB}) public void uploadBufferedRangeGreaterThanMaxPutRange(long length) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeTrailingDot() { primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), null, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @ParameterizedTest @MethodSource("bufferedUploadVariousPartitions") public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper .getRandomBuffer(Math.toIntExact(length))); assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } private static Stream<Arguments> bufferedUploadVariousPartitions() { return Stream.of( Arguments.of(1024L, null), Arguments.of(1024L, 1024L), Arguments.of(1024L, 256L), Arguments.of(4L * Constants.MB, null), Arguments.of(4L * Constants.MB, 1024L), Arguments.of(20L * Constants.MB, null), Arguments.of(20L * Constants.MB, 4L * Constants.MB) ); } @Test public void bufferedUploadErrorPartitionTooBig() { long length = 20 * Constants.MB; long uploadChunkLength = 20 * Constants.MB; primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } @Test public void uploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void parallelUploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadRangeDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadDataRetryOnTransientFailure() { ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); primaryFileClient.create(1024); clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null, null); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadAndClearRange() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void uploadAndClearRangeWithArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRangeWithResponse(7, 1, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void clearRangeTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse( DATA.getDefaultDataSizeLong(), 0, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndClearRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); fileClient.create(fullInfoString.length()); fileClient.uploadRange(fullInfoData, fullInfoString.length()); fileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void clearRangeError() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @Test public void clearRangeErrorArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRangeWithResponse(7, 20, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( DATA.getDefaultInputStream(), size), null, Context.NONE)); assertTrue(e.getMessage().contains(errMsg)); } private static Stream<Arguments> uploadDataLengthMismatchSupplier() { return Stream.of( Arguments.of(6, "more than"), Arguments.of(8, "less than")); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void parallelUploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null)); assertTrue(e.getMessage().contains(errMsg)); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadRangeLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size)); assertTrue(e.getMessage().contains(errMsg)); } @Test public void downloadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L), false, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadFileDoesNotExist() { File uploadFile = new File(testFolder.getPath() + "/fakefile.txt"); if (uploadFile.exists()) { assert uploadFile.delete(); } UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.uploadFromFile(uploadFile.getPath())); assertInstanceOf(NoSuchFileException.class, e.getCause()); uploadFile.delete(); } /* * Tests downloading a file using a default client that doesn't have a HttpClient passed to it. */ @LiveOnly @ParameterizedTest @ValueSource(ints = { 0, 20, 16 * 1024 * 1024, 8 * 1026 * 1024 + 10, 50 * Constants.MB }) public void downloadFileBufferCopy(int fileSize) throws IOException { ShareServiceClient shareServiceClient = new ShareServiceClientBuilder() .connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()) .buildClient(); ShareFileClient fileClient = shareServiceClient.getShareClient(shareName) .createFile(filePath, fileSize); File file = FileShareTestHelper.getRandomFile(fileSize); fileClient.uploadFromFile(file.toPath().toString()); File outFile = new File(generatePathName() + ".txt"); if (outFile.exists()) { assertTrue(outFile.delete()); } fileClient.downloadToFile(outFile.toPath().toString()); assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize)); shareServiceClient.deleteShare(shareName); outFile.delete(); file.delete(); } @Test public void uploadAndDownloadFileExists() throws IOException { String data = "Download file exists"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (!downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.downloadToFile(downloadFile.getPath())); assertInstanceOf(FileAlreadyExistsException.class, e.getCause()); downloadFile.delete(); } @Test public void uploadAndDownloadToFileDoesNotExist() throws IOException { String data = "Download file DoesNotExist"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); primaryFileClient.downloadToFile(downloadFile.getPath()); Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z"); assertEquals(data, scanner.next()); scanner.close(); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangePreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; for (FileLastWrittenMode mode : modes) { primaryFileClient.create(Constants.KB); ShareFileProperties initialProps = primaryFileClient.getProperties(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = primaryFileClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @Disabled("the groovy test was not testing this test properly. need to investigate this test further.") @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void uploadRangeFromURL(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix) .endpoint(primaryFileClient.getFileUrl().toString()) .buildFileClient(); client.create(1024); client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?" + sasToken); ByteArrayOutputStream stream = new ByteArrayOutputStream(); client.download(stream); String result = new String(stream.toByteArray()); for (int i = 0; i < length; i++) { assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i)); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeFromURLOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(fileClient.getShareName()) .setFilePath(fileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); String fileNameDest = generatePathName(); ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest); fileClientDest.create(1024); Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), 1024); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertEquals(stream.toByteArray()[0], 117); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangeFromUrlPreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; primaryFileClient.create(Constants.KB); ShareFileClient destinationClient = shareClient.getFileClient(generatePathName()); destinationClient.create(Constants.KB); ShareFileProperties initialProps = destinationClient.getProperties(); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); StorageSharedKeyCredential credential = StorageSharedKeyCredential .fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); for (FileLastWrittenMode mode : modes) { destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB, primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = destinationClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertTrue(FileShareTestHelper.compareDatesWithPrecision( initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties().getFileLastWriteTime())); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(Constants.KB); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(Constants.KB); sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sasToken = shareClient.generateSas(sasValues); Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse( new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null, null); FileShareTestHelper.assertResponseStatusCode(res, 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); assertThrows(ShareStorageException.class, () -> destinationClient .uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(), sourceClient.getFileUrl()), null, null)); } @Test public void openInputStreamWithRange() throws IOException { primaryFileClient.create(1024); ShareFileRange shareFileRange = new ShareFileRange(5L, 10L); byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8); ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes); primaryFileClient.upload(inputStreamData, dataBytes.length, null); int totalBytesRead = 0; StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange); while (stream.read() != -1) { totalBytesRead++; } stream.close(); assertEquals(6, totalBytesRead); } @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void startCopy(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties, setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null, null, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB); ByteArrayInputStream inputStream = new ByteArrayInputStream(data); sourceClient.uploadRange(inputStream, Constants.KB); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null); poller.waitForCompletion(); assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between" + " the time subscribed and the time we start observing events.") @Test public void startCopyError() { primaryFileClient.create(1024); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null); ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(setFilePermission ? FILE_PERMISSION : null) .setIgnoreReadOnly(ignoreReadOnly) .setArchiveAttribute(setArchiveAttribute) .setPermissionCopyModeType(permissionType); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setIgnoreReadOnly(true) .setArchiveAttribute(true); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsFilePermission() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsChangeTime() { ShareFileInfo client = primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); smbProperties.setFileChangeTime(testResourceNamer.now()); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(), primaryFileClient.getProperties().getSmbProperties().getFileChangeTime()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs) .setFilePermissionKey(filePermissionKey); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @Test public void startCopyWithOptionLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsInvalidLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @Test public void startCopyWithOptionsMetadata() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setMetadata(testMetadata); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsWithOriginalSmbProperties() { primaryFileClient.create(1024); ShareFileProperties initialProperties = primaryFileClient.getProperties(); OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime(); OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime(); OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime(); EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes(); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(true) .setLastWrittenOn(true) .setChangedOn(true) .setFileAttributes(true); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions) .setSmbPropertiesToCopy(list); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); ShareFileProperties resultProperties = primaryFileClient.getProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties() .getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties() .getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties() .getFileChangeTime()); assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn, boolean fileAttributes) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(createdOn) .setLastWrittenOn(lastWrittenOn) .setChangedOn(changedOn) .setFileAttributes(fileAttributes); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFileChangeTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE) .setSmbPropertiesToCopy(list); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void startCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient sourceClient = dirClient.getFileClient(generatePathName()); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); String sourceURL = sourceClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @Test public void abortCopy() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null)); } @Test public void abortCopyInvalidLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); assertThrows(ShareStorageException.class, () -> { SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null); }); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void abortCopyTrailingDot() { ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]); String fileName = generatePathName() + "."; ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null); primaryFileClient.create(Constants.MB); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient(); dest.create(Constants.MB); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void abortCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient sourceClient = dirClient.getFileClient(fileName); sourceClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); String sourceURL = sourceClient.getFileUrl(); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyError() { assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId")); } @Test public void deleteFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void deleteFileTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void deleteFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202); } @Test public void deleteFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.deleteWithResponse(null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void deleteIfExistsFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null), 202); } @Test public void deleteIfExistsFileMin() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); primaryFileClient.deleteIfExists(); } @Test public void deleteIfExistsFileThatDoesNotExist() { ShareFileClient client = shareClient.getFileClient(generateShareName()); Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null); assertFalse(response.getValue()); FileShareTestHelper.assertResponseStatusCode(response, 404); assertFalse(client.exists()); } @Test public void deleteIfExistsFileThatWasAlreadyDeleted() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); assertTrue(primaryFileClient.deleteIfExists()); assertFalse(primaryFileClient.deleteIfExists()); } @Test public void getProperties() { primaryFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void getPropertiesTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void getPropertiesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); ShareFileInfo createInfo = fileClient.create(Constants.KB); ShareFileProperties properties = fileClient.getProperties(); assertEquals(createInfo.getETag(), properties.getETag()); assertEquals(createInfo.getLastModified(), properties.getLastModified()); assertEquals(createInfo.getSmbProperties().getFilePermissionKey(), properties.getSmbProperties().getFilePermissionKey()); assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(), properties.getSmbProperties().getNtfsFileAttributes()); assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(), properties.getSmbProperties().getFileLastWriteTime()); assertEquals(createInfo.getSmbProperties().getFileCreationTime(), properties.getSmbProperties().getFileCreationTime()); assertEquals(createInfo.getSmbProperties().getFileChangeTime(), properties.getSmbProperties().getFileChangeTime()); assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId()); assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId()); } @Test public void getPropertiesError() { ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); assertTrue(ex.getMessage().contains("ResourceNotFound")); } @Test public void setHttpHeadersFpk() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void setHttpHeadersFp() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, FILE_PERMISSION, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void setHttpHeadersChangeTime() { primaryFileClient.create(512); OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setHttpHeadersTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); OffsetDateTime changeTime = testResourceNamer.now(); shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setHttpHeadersOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); httpHeaders = new ShareFileHttpHeaders() .setContentType("application/octet-stream") .setContentDisposition("attachment") .setCacheControl("no-transform") .setContentEncoding("gzip") .setContentLanguage("en"); Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null, null); ShareFileProperties properties = fileClient.getProperties(); FileShareTestHelper.assertResponseStatusCode(res, 200); assertNotNull(res.getValue().getETag()); assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG)); assertEquals(properties.getContentType(), "application/octet-stream"); assertEquals(properties.getContentDisposition(), "attachment"); assertEquals(properties.getCacheControl(), "no-transform"); assertEquals(properties.getContentEncoding(), "gzip"); assertNull(properties.getContentMd5()); } @Test public void setHttpHeadersError() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void setMetadata() { primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient .setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setMetadataTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = shareFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = shareFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setMetadataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null); ShareFileProperties getPropertiesBefore = fileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = fileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @Test public void setMetadataError() { primaryFileClient.create(1024); Map<String, String> errorMetadata = Collections.singletonMap("", "value"); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY); } @Test public void listRanges() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); String fileName = generatePathName() + "."; String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesWithRange() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshot() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot(snapInfo.getSnapshot()) .buildFileClient(); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshotFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot("2020-08-07T16:58:02.0000000Z") .buildFileClient(); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); })); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesOAuth() throws IOException { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); fileClient.uploadFromFile(uploadFile); fileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear, List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) { primaryFileClient.create(4 * Constants.MB); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)), 4 * Constants.MB); String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName()) .createSnapshot() .getSnapshot(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(new ByteArrayInputStream( FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); ClearRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesDiffOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName()) .createSnapshot() .getSnapshot(); List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges(); List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges(); List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges(); List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)), size, it.getStart(), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); FileRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffWithRange() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffLease() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null) .getValue().getRanges().get(0); assertEquals(1024, range.getStart()); assertEquals(1030, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesDiffTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); String fileNameWithDot = generateShareName() + "."; primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L); primaryFileClient.uploadRangeWithResponse(options, null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot); } @Test public void listRangesDiffLeaseFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null, null); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions() .setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesDiffFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges() .get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listHandles() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @Test public void listHandlesWithMaxResult() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listHandlesTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listHandlesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); assertEquals(0, fileClient.listHandles().stream().count()); } @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void listHandlesAccessRights() { ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare"); ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory"); ShareFileClient fileClient = directoryClient.getFileClient("myfile"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void forceCloseHandleMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @Test public void forceCloseHandleInvalidHandleID() { primaryFileClient.create(512); assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId")); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void forceCloseHandleTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void forceCloseHandleOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07") @Test public void forceCloseAllHandlesMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMin() { primaryFileClient.create(512); assertNotNull(primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"}) @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameWithResponse() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()), null, null); ShareFileClient renamedClient = resp.getValue(); assertNotNull(renamedClient.getProperties()); assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12") @Test public void renameSasToken() { ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sas = shareClient.generateSas(sasValues); ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl()); primaryFileClient.create(1024); String fileName = generatePathName(); ShareFileClient destClient = client.rename(fileName); assertNotNull(destClient.getProperties()); assertEquals(fileName, destClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDifferentDirectory() { primaryFileClient.create(512); ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName()); dc.create(); ShareFileClient destinationPath = dc.getFileClient(generatePathName()); ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath()); assertTrue(destinationPath.exists()); assertEquals(destinationPath.getFilePath(), resultClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameReplaceIfExists(boolean replaceIfExists) { primaryFileClient.create(512); ShareFileClient destination = shareClient.getFileClient(generatePathName()); destination.create(512); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath()) .setReplaceIfExists(replaceIfExists), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(replaceIfExists, !exception); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameIgnoreReadOnly(boolean ignoreReadOnly) { primaryFileClient.create(512); FileSmbProperties props = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)); ShareFileClient destinationFile = shareClient.getFileClient(generatePathName()); destinationFile.createWithResponse(512L, null, props, null, null, null, null, null); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath()) .setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(exception, !ignoreReadOnly); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermission() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission), null, null).getValue(); assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermissionAndKeySet() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission) .setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null) .getValue()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameFileSmbProperties() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; String permissionKey = shareClient.createPermission(filePermission); OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5); OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2); OffsetDateTime fileChangeTime = testResourceNamer.now(); FileSmbProperties smbProperties = new FileSmbProperties() .setFilePermissionKey(permissionKey) .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)) .setFileCreationTime(fileCreationTime) .setFileLastWriteTime(fileLastWriteTime) .setFileChangeTime(fileChangeTime); ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSmbProperties(smbProperties), null, null).getValue(); ShareFileProperties destProperties = destClient.getProperties(); assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)); assertNotNull(destProperties.getSmbProperties().getFileCreationTime()); assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(), fileChangeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMetadata() { primaryFileClient.create(512); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties getPropertiesAfter = renamedClient.getProperties(); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void renameTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + "."); primaryFileClient.create(1024); Response<ShareFileClient> response = primaryFileClient .renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null); FileShareTestHelper.assertResponseStatusCode(response, 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameError() { primaryFileClient = shareClient.getFileClient(generatePathName()); assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceAC() { primaryFileClient.create(512); String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceACFail() { primaryFileClient.create(512); setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSourceRequestConditions(src), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestAC() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestACFail() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName) .setDestinationRequestConditions(src).setReplaceIfExists(true), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameContentType() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties props = renamedClient.getProperties(); assertEquals(props.getContentType(), "mytype"); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); String fileRename = generatePathName(); Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null, null); ShareFileClient renamedClient = resp.getValue(); renamedClient.getProperties(); assertEquals(fileRename, renamedClient.getFilePath()); assertThrows(ShareStorageException.class, fileClient::getProperties); } @Test public void getSnapshotId() { String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString(); ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot) .buildFileClient(); assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId()); } @Test public void getShareName() { assertEquals(shareName, primaryFileClient.getShareName()); } @Test public void getFilePath() { assertEquals(filePath, primaryFileClient.getFilePath()); } public void perCallPolicy() { primaryFileClient.create(512); ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(), primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient(); Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null); assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09"); } @Test public void defaultAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(null) /* should default to "https: ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void storageAccountAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName()))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void audienceError() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience("badAudience"))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists); assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode()); } @Test public void audienceFromString() { String url = String.format("https: ShareAudience audience = ShareAudience.fromString(url); String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(audience)); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } /* Uncomment this test when Client Name is enabled with STG 93. @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04") @Test public void listHandlesClientName() { ShareClient client = primaryFileServiceClient.getShareClient("testing"); ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1"); ShareFileClient fileClient = directoryClient.getFileClient("test.txt"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertNotNull(list.get(0).getClientName()); } */ }
Adding an assertion to make sure the special characters are included in the destination client.
public void renameWithUnicodeChars(String specialChar) { ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt"); fileClient.create(512); ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt"); assertNotNull(destClient); }
assertNotNull(destClient);
public void renameWithUnicodeChars(String specialChar) { ShareFileClient fileClient = shareClient.getFileClient("test-file-source" + specialChar + " pdf.txt"); fileClient.create(512); ShareFileClient destClient = fileClient.rename("test-file-destination" + specialChar + " pdf.txt"); assertNotNull(destClient); assertTrue(Utility.urlEncode(destClient.getFileUrl()).contains(Utility.urlEncode(specialChar))); }
class FileApiTests extends FileShareTestBase { private ShareFileClient primaryFileClient; private ShareClient shareClient; private String shareName; private String filePath; private static Map<String, String> testMetadata; private static ShareFileHttpHeaders httpHeaders; private FileSmbProperties smbProperties; private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL"; @BeforeEach public void setup() { shareName = generateShareName(); filePath = generatePathName(); shareClient = shareBuilderHelper(shareName).buildClient(); shareClient.create(); primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); testMetadata = Collections.singletonMap("testmetadata", "value"); httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en") .setContentType("application/octet-stream"); smbProperties = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL)); } @Test public void getFileURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: String fileURL = primaryFileClient.getFileUrl(); assertEquals(expectURL, fileURL); } @Test public void getShareSnapshotURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot(); expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot(); ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot()) .buildClient().getFileClient(filePath); String fileURL = newFileClient.getFileUrl(); assertEquals(expectURL, fileURL); String snapshotEndpoint = String.format("https: shareName, filePath, shareSnapshotInfo.getSnapshot()); ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint); assertEquals(client.getFileUrl(), snapshotEndpoint); } @Test public void exists() { primaryFileClient.create(Constants.KB); assertTrue(primaryFileClient.exists()); } @Test public void doesNotExist() { assertFalse(primaryFileClient.exists()); } @Test public void existsError() { primaryFileClient = fileBuilderHelper(shareName, filePath) .sasToken("sig=dummyToken").buildFileClient(); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists()); assertEquals(e.getResponse().getStatusCode(), 403); } @Test public void createFile() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void createFile4TB() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null, null, null, null, null), 201); } @Test public void createFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void createFileWithArgsFpk() { String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient .createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void createFileWithArgsFp() { smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void createChangeTime() { OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null, null, null, null, null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void createFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null, null); assertEquals(fileClient.getShareName(), shareName); String[] filePath = fileClient.getFilePath().split("/"); assertEquals(fileName, filePath[1]); assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG)); } @Test public void createFileWithArgsError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.createWithResponse(-1, null, null, null, testMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @ParameterizedTest @MethodSource("permissionAndKeySupplier") public void createFilePermissionAndKeyError(String filePermissionKey, String permission) { FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null)); } private static Stream<Arguments> permissionAndKeySupplier() { return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION), Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB)))); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @ParameterizedTest @ValueSource(booleans = {true, false}) public void createFileTrailingDot(boolean allowTrailingDot) { shareClient = getShareClient(shareName, allowTrailingDot, null); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); String fileName = generatePathName(); String fileNameWithDot = fileName + "."; ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot); fileClient.create(1024); List<String> foundFiles = new ArrayList<>(); for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) { foundFiles.add(fileRef.getName()); } if (allowTrailingDot) { assertEquals(fileNameWithDot, foundFiles.get(0)); } else { assertEquals(fileName, foundFiles.get(0)); } } @Test public void uploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient .downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndDownloadDataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadInputStreamNoLength() { primaryFileClient.create(DATA.getDefaultDataSize()); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void parallelUploadInputStreamBadLength() { int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1}; for (int length : lengths) { primaryFileClient.create(DATA.getDefaultDataSize()); assertThrows(Exception.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), length), null, null)); } } @Test public void uploadSuccessfulRetry() { primaryFileClient.create(DATA.getDefaultDataSize()); ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadRangeAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadRangeAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void downloadAllNull() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(DATA.getDefaultBytes(), body); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertNotNull(headers.getContentLength()); assertNotNull(headers.getContentType()); assertNull(headers.getContentMd5()); assertNull(headers.getContentEncoding()); assertNull(headers.getCacheControl()); assertNull(headers.getContentDisposition()); assertNull(headers.getContentLanguage()); } @ParameterizedTest @ValueSource(ints = {0, 1}) public void downloadEmptyFile(int fileSize) { primaryFileClient.create(fileSize); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); primaryFileClient.download(outStream); byte[] result = outStream.toByteArray(); assertEquals(result.length, fileSize); if (fileSize > 0) { assertEquals(0, result[0]); } } /* This is to test the appropriate integration of DownloadResponse, including setting the correct range values on HttpGetterInfo. */ @Test public void downloadWithRetryRange() { /* We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We don't need to check the data here, but we want to ensure that the correct range is set each time. This will test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was constructed in FileClient.download(). */ primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6")); ShareFileRange range = new ShareFileRange(2, 6L); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); RuntimeException e = assertThrows(RuntimeException.class, () -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions() .setRange(range).setRetryOptions(options), null, null)); /* Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is NOT thrown because the types would not match. */ assertInstanceOf(IOException.class, e.getCause()); } @Test public void downloadRetryDefault() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5)); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); failureClient.download(outStream); String bodyStr = outStream.toString(); assertEquals(bodyStr, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void downloadTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(DATA.getDefaultDataSizeLong()); shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); shareFileClient.download(outStream); String downloadedData = outStream.toString(); assertEquals(downloadedData, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void downloadOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileProperties properties = fileClient.getProperties(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(body, DATA.getDefaultBytes()); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertEquals(headers.getContentLength(), properties.getContentLength()); assertEquals(headers.getContentType(), properties.getContentType()); assertEquals(headers.getContentMd5(), properties.getContentMd5()); assertEquals(headers.getContentEncoding(), properties.getContentEncoding()); assertEquals(headers.getCacheControl(), properties.getCacheControl()); assertEquals(headers.getContentDisposition(), properties.getContentDisposition()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void uploadRange4TB() { long fileSize = 4 * Constants.TB; primaryFileClient.create(fileSize); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()) .setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */ ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @ParameterizedTest @ValueSource(longs = { 4 * Constants.MB, 5 * Constants.MB}) public void uploadBufferedRangeGreaterThanMaxPutRange(long length) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeTrailingDot() { primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), null, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @ParameterizedTest @MethodSource("bufferedUploadVariousPartitions") public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper .getRandomBuffer(Math.toIntExact(length))); assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } private static Stream<Arguments> bufferedUploadVariousPartitions() { return Stream.of( Arguments.of(1024L, null), Arguments.of(1024L, 1024L), Arguments.of(1024L, 256L), Arguments.of(4L * Constants.MB, null), Arguments.of(4L * Constants.MB, 1024L), Arguments.of(20L * Constants.MB, null), Arguments.of(20L * Constants.MB, 4L * Constants.MB) ); } @Test public void bufferedUploadErrorPartitionTooBig() { long length = 20 * Constants.MB; long uploadChunkLength = 20 * Constants.MB; primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } @Test public void uploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void parallelUploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadRangeDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadDataRetryOnTransientFailure() { ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); primaryFileClient.create(1024); clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null, null); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadAndClearRange() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void uploadAndClearRangeWithArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRangeWithResponse(7, 1, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void clearRangeTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse( DATA.getDefaultDataSizeLong(), 0, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndClearRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); fileClient.create(fullInfoString.length()); fileClient.uploadRange(fullInfoData, fullInfoString.length()); fileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void clearRangeError() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @Test public void clearRangeErrorArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRangeWithResponse(7, 20, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( DATA.getDefaultInputStream(), size), null, Context.NONE)); assertTrue(e.getMessage().contains(errMsg)); } private static Stream<Arguments> uploadDataLengthMismatchSupplier() { return Stream.of( Arguments.of(6, "more than"), Arguments.of(8, "less than")); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void parallelUploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null)); assertTrue(e.getMessage().contains(errMsg)); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadRangeLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size)); assertTrue(e.getMessage().contains(errMsg)); } @Test public void downloadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L), false, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadFileDoesNotExist() { File uploadFile = new File(testFolder.getPath() + "/fakefile.txt"); if (uploadFile.exists()) { assert uploadFile.delete(); } UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.uploadFromFile(uploadFile.getPath())); assertInstanceOf(NoSuchFileException.class, e.getCause()); uploadFile.delete(); } /* * Tests downloading a file using a default client that doesn't have a HttpClient passed to it. */ @LiveOnly @ParameterizedTest @ValueSource(ints = { 0, 20, 16 * 1024 * 1024, 8 * 1026 * 1024 + 10, 50 * Constants.MB }) public void downloadFileBufferCopy(int fileSize) throws IOException { ShareServiceClient shareServiceClient = new ShareServiceClientBuilder() .connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()) .buildClient(); ShareFileClient fileClient = shareServiceClient.getShareClient(shareName) .createFile(filePath, fileSize); File file = FileShareTestHelper.getRandomFile(fileSize); fileClient.uploadFromFile(file.toPath().toString()); File outFile = new File(generatePathName() + ".txt"); if (outFile.exists()) { assertTrue(outFile.delete()); } fileClient.downloadToFile(outFile.toPath().toString()); assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize)); shareServiceClient.deleteShare(shareName); outFile.delete(); file.delete(); } @Test public void uploadAndDownloadFileExists() throws IOException { String data = "Download file exists"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (!downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.downloadToFile(downloadFile.getPath())); assertInstanceOf(FileAlreadyExistsException.class, e.getCause()); downloadFile.delete(); } @Test public void uploadAndDownloadToFileDoesNotExist() throws IOException { String data = "Download file DoesNotExist"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); primaryFileClient.downloadToFile(downloadFile.getPath()); Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z"); assertEquals(data, scanner.next()); scanner.close(); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangePreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; for (FileLastWrittenMode mode : modes) { primaryFileClient.create(Constants.KB); ShareFileProperties initialProps = primaryFileClient.getProperties(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = primaryFileClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @Disabled("the groovy test was not testing this test properly. need to investigate this test further.") @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void uploadRangeFromURL(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix) .endpoint(primaryFileClient.getFileUrl().toString()) .buildFileClient(); client.create(1024); client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?" + sasToken); ByteArrayOutputStream stream = new ByteArrayOutputStream(); client.download(stream); String result = new String(stream.toByteArray()); for (int i = 0; i < length; i++) { assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i)); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeFromURLOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(fileClient.getShareName()) .setFilePath(fileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); String fileNameDest = generatePathName(); ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest); fileClientDest.create(1024); Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), 1024); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertEquals(stream.toByteArray()[0], 117); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangeFromUrlPreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; primaryFileClient.create(Constants.KB); ShareFileClient destinationClient = shareClient.getFileClient(generatePathName()); destinationClient.create(Constants.KB); ShareFileProperties initialProps = destinationClient.getProperties(); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); StorageSharedKeyCredential credential = StorageSharedKeyCredential .fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); for (FileLastWrittenMode mode : modes) { destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB, primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = destinationClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertTrue(FileShareTestHelper.compareDatesWithPrecision( initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties().getFileLastWriteTime())); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(Constants.KB); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(Constants.KB); sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sasToken = shareClient.generateSas(sasValues); Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse( new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null, null); FileShareTestHelper.assertResponseStatusCode(res, 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); assertThrows(ShareStorageException.class, () -> destinationClient .uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(), sourceClient.getFileUrl()), null, null)); } @Test public void openInputStreamWithRange() throws IOException { primaryFileClient.create(1024); ShareFileRange shareFileRange = new ShareFileRange(5L, 10L); byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8); ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes); primaryFileClient.upload(inputStreamData, dataBytes.length, null); int totalBytesRead = 0; StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange); while (stream.read() != -1) { totalBytesRead++; } stream.close(); assertEquals(6, totalBytesRead); } @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void startCopy(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties, setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null, null, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB); ByteArrayInputStream inputStream = new ByteArrayInputStream(data); sourceClient.uploadRange(inputStream, Constants.KB); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null); poller.waitForCompletion(); assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between" + " the time subscribed and the time we start observing events.") @Test public void startCopyError() { primaryFileClient.create(1024); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null); ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(setFilePermission ? FILE_PERMISSION : null) .setIgnoreReadOnly(ignoreReadOnly) .setArchiveAttribute(setArchiveAttribute) .setPermissionCopyModeType(permissionType); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setIgnoreReadOnly(true) .setArchiveAttribute(true); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsFilePermission() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsChangeTime() { ShareFileInfo client = primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); smbProperties.setFileChangeTime(testResourceNamer.now()); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(), primaryFileClient.getProperties().getSmbProperties().getFileChangeTime()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs) .setFilePermissionKey(filePermissionKey); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @Test public void startCopyWithOptionLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsInvalidLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @Test public void startCopyWithOptionsMetadata() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setMetadata(testMetadata); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsWithOriginalSmbProperties() { primaryFileClient.create(1024); ShareFileProperties initialProperties = primaryFileClient.getProperties(); OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime(); OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime(); OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime(); EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes(); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(true) .setLastWrittenOn(true) .setChangedOn(true) .setFileAttributes(true); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions) .setSmbPropertiesToCopy(list); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); ShareFileProperties resultProperties = primaryFileClient.getProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties() .getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties() .getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties() .getFileChangeTime()); assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn, boolean fileAttributes) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(createdOn) .setLastWrittenOn(lastWrittenOn) .setChangedOn(changedOn) .setFileAttributes(fileAttributes); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFileChangeTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE) .setSmbPropertiesToCopy(list); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void startCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient sourceClient = dirClient.getFileClient(generatePathName()); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); String sourceURL = sourceClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @Test public void abortCopy() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null)); } @Test public void abortCopyInvalidLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); assertThrows(ShareStorageException.class, () -> { SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null); }); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void abortCopyTrailingDot() { ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]); String fileName = generatePathName() + "."; ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null); primaryFileClient.create(Constants.MB); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient(); dest.create(Constants.MB); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void abortCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient sourceClient = dirClient.getFileClient(fileName); sourceClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); String sourceURL = sourceClient.getFileUrl(); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyError() { assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId")); } @Test public void deleteFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void deleteFileTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void deleteFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202); } @Test public void deleteFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.deleteWithResponse(null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void deleteIfExistsFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null), 202); } @Test public void deleteIfExistsFileMin() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); primaryFileClient.deleteIfExists(); } @Test public void deleteIfExistsFileThatDoesNotExist() { ShareFileClient client = shareClient.getFileClient(generateShareName()); Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null); assertFalse(response.getValue()); FileShareTestHelper.assertResponseStatusCode(response, 404); assertFalse(client.exists()); } @Test public void deleteIfExistsFileThatWasAlreadyDeleted() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); assertTrue(primaryFileClient.deleteIfExists()); assertFalse(primaryFileClient.deleteIfExists()); } @Test public void getProperties() { primaryFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void getPropertiesTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void getPropertiesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); ShareFileInfo createInfo = fileClient.create(Constants.KB); ShareFileProperties properties = fileClient.getProperties(); assertEquals(createInfo.getETag(), properties.getETag()); assertEquals(createInfo.getLastModified(), properties.getLastModified()); assertEquals(createInfo.getSmbProperties().getFilePermissionKey(), properties.getSmbProperties().getFilePermissionKey()); assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(), properties.getSmbProperties().getNtfsFileAttributes()); assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(), properties.getSmbProperties().getFileLastWriteTime()); assertEquals(createInfo.getSmbProperties().getFileCreationTime(), properties.getSmbProperties().getFileCreationTime()); assertEquals(createInfo.getSmbProperties().getFileChangeTime(), properties.getSmbProperties().getFileChangeTime()); assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId()); assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId()); } @Test public void getPropertiesError() { ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); assertTrue(ex.getMessage().contains("ResourceNotFound")); } @Test public void setHttpHeadersFpk() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void setHttpHeadersFp() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, FILE_PERMISSION, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void setHttpHeadersChangeTime() { primaryFileClient.create(512); OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setHttpHeadersTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); OffsetDateTime changeTime = testResourceNamer.now(); shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setHttpHeadersOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); httpHeaders = new ShareFileHttpHeaders() .setContentType("application/octet-stream") .setContentDisposition("attachment") .setCacheControl("no-transform") .setContentEncoding("gzip") .setContentLanguage("en"); Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null, null); ShareFileProperties properties = fileClient.getProperties(); FileShareTestHelper.assertResponseStatusCode(res, 200); assertNotNull(res.getValue().getETag()); assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG)); assertEquals(properties.getContentType(), "application/octet-stream"); assertEquals(properties.getContentDisposition(), "attachment"); assertEquals(properties.getCacheControl(), "no-transform"); assertEquals(properties.getContentEncoding(), "gzip"); assertNull(properties.getContentMd5()); } @Test public void setHttpHeadersError() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void setMetadata() { primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient .setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setMetadataTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = shareFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = shareFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setMetadataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null); ShareFileProperties getPropertiesBefore = fileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = fileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @Test public void setMetadataError() { primaryFileClient.create(1024); Map<String, String> errorMetadata = Collections.singletonMap("", "value"); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY); } @Test public void listRanges() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); String fileName = generatePathName() + "."; String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesWithRange() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshot() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot(snapInfo.getSnapshot()) .buildFileClient(); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshotFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot("2020-08-07T16:58:02.0000000Z") .buildFileClient(); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); })); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesOAuth() throws IOException { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); fileClient.uploadFromFile(uploadFile); fileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear, List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) { primaryFileClient.create(4 * Constants.MB); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)), 4 * Constants.MB); String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName()) .createSnapshot() .getSnapshot(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(new ByteArrayInputStream( FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); ClearRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesDiffOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName()) .createSnapshot() .getSnapshot(); List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges(); List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges(); List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges(); List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)), size, it.getStart(), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); FileRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffWithRange() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffLease() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null) .getValue().getRanges().get(0); assertEquals(1024, range.getStart()); assertEquals(1030, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesDiffTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); String fileNameWithDot = generateShareName() + "."; primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L); primaryFileClient.uploadRangeWithResponse(options, null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot); } @Test public void listRangesDiffLeaseFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null, null); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions() .setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesDiffFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges() .get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listHandles() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @Test public void listHandlesWithMaxResult() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listHandlesTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listHandlesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); assertEquals(0, fileClient.listHandles().stream().count()); } @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void listHandlesAccessRights() { ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare"); ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory"); ShareFileClient fileClient = directoryClient.getFileClient("myfile"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void forceCloseHandleMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @Test public void forceCloseHandleInvalidHandleID() { primaryFileClient.create(512); assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId")); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void forceCloseHandleTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void forceCloseHandleOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07") @Test public void forceCloseAllHandlesMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMin() { primaryFileClient.create(512); assertNotNull(primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"}) @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameWithResponse() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()), null, null); ShareFileClient renamedClient = resp.getValue(); assertNotNull(renamedClient.getProperties()); assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12") @Test public void renameSasToken() { ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sas = shareClient.generateSas(sasValues); ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl()); primaryFileClient.create(1024); String fileName = generatePathName(); ShareFileClient destClient = client.rename(fileName); assertNotNull(destClient.getProperties()); assertEquals(fileName, destClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDifferentDirectory() { primaryFileClient.create(512); ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName()); dc.create(); ShareFileClient destinationPath = dc.getFileClient(generatePathName()); ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath()); assertTrue(destinationPath.exists()); assertEquals(destinationPath.getFilePath(), resultClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameReplaceIfExists(boolean replaceIfExists) { primaryFileClient.create(512); ShareFileClient destination = shareClient.getFileClient(generatePathName()); destination.create(512); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath()) .setReplaceIfExists(replaceIfExists), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(replaceIfExists, !exception); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameIgnoreReadOnly(boolean ignoreReadOnly) { primaryFileClient.create(512); FileSmbProperties props = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)); ShareFileClient destinationFile = shareClient.getFileClient(generatePathName()); destinationFile.createWithResponse(512L, null, props, null, null, null, null, null); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath()) .setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(exception, !ignoreReadOnly); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermission() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission), null, null).getValue(); assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermissionAndKeySet() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission) .setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null) .getValue()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameFileSmbProperties() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; String permissionKey = shareClient.createPermission(filePermission); OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5); OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2); OffsetDateTime fileChangeTime = testResourceNamer.now(); FileSmbProperties smbProperties = new FileSmbProperties() .setFilePermissionKey(permissionKey) .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)) .setFileCreationTime(fileCreationTime) .setFileLastWriteTime(fileLastWriteTime) .setFileChangeTime(fileChangeTime); ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSmbProperties(smbProperties), null, null).getValue(); ShareFileProperties destProperties = destClient.getProperties(); assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)); assertNotNull(destProperties.getSmbProperties().getFileCreationTime()); assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(), fileChangeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMetadata() { primaryFileClient.create(512); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties getPropertiesAfter = renamedClient.getProperties(); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void renameTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + "."); primaryFileClient.create(1024); Response<ShareFileClient> response = primaryFileClient .renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null); FileShareTestHelper.assertResponseStatusCode(response, 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameError() { primaryFileClient = shareClient.getFileClient(generatePathName()); assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceAC() { primaryFileClient.create(512); String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceACFail() { primaryFileClient.create(512); setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSourceRequestConditions(src), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestAC() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestACFail() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName) .setDestinationRequestConditions(src).setReplaceIfExists(true), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameContentType() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties props = renamedClient.getProperties(); assertEquals(props.getContentType(), "mytype"); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); String fileRename = generatePathName(); Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null, null); ShareFileClient renamedClient = resp.getValue(); renamedClient.getProperties(); assertEquals(fileRename, renamedClient.getFilePath()); assertThrows(ShareStorageException.class, fileClient::getProperties); } @Test public void getSnapshotId() { String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString(); ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot) .buildFileClient(); assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId()); } @Test public void getShareName() { assertEquals(shareName, primaryFileClient.getShareName()); } @Test public void getFilePath() { assertEquals(filePath, primaryFileClient.getFilePath()); } public void perCallPolicy() { primaryFileClient.create(512); ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(), primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient(); Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null); assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09"); } @Test public void defaultAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(null) /* should default to "https: ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void storageAccountAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName()))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void audienceError() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience("badAudience"))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists); assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode()); } @Test public void audienceFromString() { String url = String.format("https: ShareAudience audience = ShareAudience.fromString(url); String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(audience)); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } /* Uncomment this test when Client Name is enabled with STG 93. @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04") @Test public void listHandlesClientName() { ShareClient client = primaryFileServiceClient.getShareClient("testing"); ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1"); ShareFileClient fileClient = directoryClient.getFileClient("test.txt"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertNotNull(list.get(0).getClientName()); } */ }
class FileApiTests extends FileShareTestBase { private ShareFileClient primaryFileClient; private ShareClient shareClient; private String shareName; private String filePath; private static Map<String, String> testMetadata; private static ShareFileHttpHeaders httpHeaders; private FileSmbProperties smbProperties; private static final String FILE_PERMISSION = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)S:NO_ACCESS_CONTROL"; @BeforeEach public void setup() { shareName = generateShareName(); filePath = generatePathName(); shareClient = shareBuilderHelper(shareName).buildClient(); shareClient.create(); primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); testMetadata = Collections.singletonMap("testmetadata", "value"); httpHeaders = new ShareFileHttpHeaders().setContentLanguage("en") .setContentType("application/octet-stream"); smbProperties = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.<NtfsFileAttributes> of(NtfsFileAttributes.NORMAL)); } @Test public void getFileURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: String fileURL = primaryFileClient.getFileUrl(); assertEquals(expectURL, fileURL); } @Test public void getShareSnapshotURL() { String accountName = StorageSharedKeyCredential.fromConnectionString(ENVIRONMENT.getPrimaryAccount() .getConnectionString()).getAccountName(); String expectURL = String.format("https: ShareSnapshotInfo shareSnapshotInfo = shareClient.createSnapshot(); expectURL = expectURL + "?sharesnapshot=" + shareSnapshotInfo.getSnapshot(); ShareFileClient newFileClient = shareBuilderHelper(shareName).snapshot(shareSnapshotInfo.getSnapshot()) .buildClient().getFileClient(filePath); String fileURL = newFileClient.getFileUrl(); assertEquals(expectURL, fileURL); String snapshotEndpoint = String.format("https: shareName, filePath, shareSnapshotInfo.getSnapshot()); ShareFileClient client = getFileClient(StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()), snapshotEndpoint); assertEquals(client.getFileUrl(), snapshotEndpoint); } @Test public void exists() { primaryFileClient.create(Constants.KB); assertTrue(primaryFileClient.exists()); } @Test public void doesNotExist() { assertFalse(primaryFileClient.exists()); } @Test public void existsError() { primaryFileClient = fileBuilderHelper(shareName, filePath) .sasToken("sig=dummyToken").buildFileClient(); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.exists()); assertEquals(e.getResponse().getStatusCode(), 403); } @Test public void createFile() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(1024, null, null, null, null, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void createFile4TB() { FileShareTestHelper.assertResponseStatusCode(primaryFileClient.createWithResponse(4 * Constants.TB, null, null, null, null, null, null), 201); } @Test public void createFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.create(-1)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void createFileWithArgsFpk() { String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient .createWithResponse(1024, httpHeaders, smbProperties, null, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void createFileWithArgsFp() { smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.createWithResponse(1024, httpHeaders, smbProperties, FILE_PERMISSION, testMetadata, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 201); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void createChangeTime() { OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.createWithResponse(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null, null, null, null, null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void createFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); Response<ShareFileInfo> result = fileClient.createWithResponse(Constants.KB, null, null, null, null, null, null); assertEquals(fileClient.getShareName(), shareName); String[] filePath = fileClient.getFilePath().split("/"); assertEquals(fileName, filePath[1]); assertEquals(result.getValue().getETag(), result.getHeaders().getValue(HttpHeaderName.ETAG)); } @Test public void createFileWithArgsError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.createWithResponse(-1, null, null, null, testMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @ParameterizedTest @MethodSource("permissionAndKeySupplier") public void createFilePermissionAndKeyError(String filePermissionKey, String permission) { FileSmbProperties smbProperties = new FileSmbProperties().setFilePermissionKey(filePermissionKey); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.createWithResponse(1024, null, smbProperties, permission, null, null, null)); } private static Stream<Arguments> permissionAndKeySupplier() { return Stream.of(Arguments.of("filePermissionKey", FILE_PERMISSION), Arguments.of(null, new String(FileShareTestHelper.getRandomBuffer(9 * Constants.KB)))); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @ParameterizedTest @ValueSource(booleans = {true, false}) public void createFileTrailingDot(boolean allowTrailingDot) { shareClient = getShareClient(shareName, allowTrailingDot, null); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); String fileName = generatePathName(); String fileNameWithDot = fileName + "."; ShareFileClient fileClient = rootDirectory.getFileClient(fileNameWithDot); fileClient.create(1024); List<String> foundFiles = new ArrayList<>(); for (ShareFileItem fileRef : rootDirectory.listFilesAndDirectories()) { foundFiles.add(fileRef.getName()); } if (allowTrailingDot) { assertEquals(fileNameWithDot, foundFiles.get(0)); } else { assertEquals(fileName, foundFiles.get(0)); } } @Test public void uploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient .downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndDownloadDataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void parallelUploadInputStreamNoLength() { primaryFileClient.create(DATA.getDefaultDataSize()); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void parallelUploadInputStreamBadLength() { int[] lengths = new int[]{0, -100, DATA.getDefaultDataSize() - 1, DATA.getDefaultDataSize() + 1}; for (int length : lengths) { primaryFileClient.create(DATA.getDefaultDataSize()); assertThrows(Exception.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), length), null, null)); } } @Test public void uploadSuccessfulRetry() { primaryFileClient.create(DATA.getDefaultDataSize()); ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); clientWithFailure.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.download(os); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadRangeAndDownloadData() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void uploadRangeAndDownloadDataWithArgs() { primaryFileClient.create(1024); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, DATA.getDefaultDataSizeLong()), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @Test public void downloadAllNull() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = primaryFileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(DATA.getDefaultBytes(), body); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertNotNull(headers.getContentLength()); assertNotNull(headers.getContentType()); assertNull(headers.getContentMd5()); assertNull(headers.getContentEncoding()); assertNull(headers.getCacheControl()); assertNull(headers.getContentDisposition()); assertNull(headers.getContentLanguage()); } @ParameterizedTest @ValueSource(ints = {0, 1}) public void downloadEmptyFile(int fileSize) { primaryFileClient.create(fileSize); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); primaryFileClient.download(outStream); byte[] result = outStream.toByteArray(); assertEquals(result.length, fileSize); if (fileSize > 0) { assertEquals(0, result[0]); } } /* This is to test the appropriate integration of DownloadResponse, including setting the correct range values on HttpGetterInfo. */ @Test public void downloadWithRetryRange() { /* We are going to make a request for some range on a blob. The Flux returned will throw an exception, forcing a retry per the DownloadRetryOptions. The next request should have the same range header, which was generated from the count and offset values in HttpGetterInfo that was constructed on the initial call to download. We don't need to check the data here, but we want to ensure that the correct range is set each time. This will test the correction of a bug that was found which caused HttpGetterInfo to have an incorrect offset when it was constructed in FileClient.download(). */ primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient fc2 = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockRetryRangeResponsePolicy("bytes=2-6")); ShareFileRange range = new ShareFileRange(2, 6L); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); RuntimeException e = assertThrows(RuntimeException.class, () -> fc2.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileDownloadOptions() .setRange(range).setRetryOptions(options), null, null)); /* Because the dummy Flux always throws an error. This will also validate that an IllegalArgumentException is NOT thrown because the types would not match. */ assertInstanceOf(IOException.class, e.getCause()); } @Test public void downloadRetryDefault() { primaryFileClient.create(DATA.getDefaultDataSizeLong()); primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileClient failureClient = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new MockFailureResponsePolicy(5)); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); failureClient.download(outStream); String bodyStr = outStream.toString(); assertEquals(bodyStr, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void downloadTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(DATA.getDefaultDataSizeLong()); shareFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); shareFileClient.download(outStream); String downloadedData = outStream.toString(); assertEquals(downloadedData, DATA.getDefaultText()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void downloadOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); fileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ShareFileProperties properties = fileClient.getProperties(); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse response = fileClient.downloadWithResponse(stream, null, null, null); byte[] body = stream.toByteArray(); ShareFileDownloadHeaders headers = response.getDeserializedHeaders(); assertArrayEquals(body, DATA.getDefaultBytes()); CoreUtils.isNullOrEmpty(headers.getMetadata()); assertEquals(headers.getContentLength(), properties.getContentLength()); assertEquals(headers.getContentType(), properties.getContentType()); assertEquals(headers.getContentMd5(), properties.getContentMd5()); assertEquals(headers.getContentEncoding(), properties.getContentEncoding()); assertEquals(headers.getCacheControl(), properties.getCacheControl()); assertEquals(headers.getContentDisposition(), properties.getContentDisposition()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void uploadRange4TB() { long fileSize = 4 * Constants.TB; primaryFileClient.create(fileSize); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()) .setOffset(fileSize - DATA.getDefaultDataSizeLong()), null, null); /* Upload to end of file. */ ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(stream, new ShareFileRange(fileSize - DATA.getDefaultDataSizeLong(), fileSize), true, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 206); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @ParameterizedTest @ValueSource(longs = { 4 * Constants.MB, 5 * Constants.MB}) public void uploadBufferedRangeGreaterThanMaxPutRange(long length) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertDoesNotThrow(() -> primaryFileClient.upload(data, length, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeTrailingDot() { primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = primaryFileClient.uploadRangeWithResponse(options, null, null); ShareFileDownloadResponse downloadResponse = primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), null, null, null); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); FileShareTestHelper.assertResponseStatusCode(downloadResponse, 200); assertEquals(downloadResponse.getDeserializedHeaders().getContentLength(), DATA.getDefaultDataSizeLong()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(DATA.getDefaultDataSizeLong()); Response<ShareFileUploadInfo> uploadResponse = fileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()), null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClient.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), DATA.getDefaultDataSizeLong()); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertArrayEquals(DATA.getDefaultBytes(), stream.toByteArray()); } @ParameterizedTest @MethodSource("bufferedUploadVariousPartitions") public void bufferedUploadVariousPartitions(Long length, Long uploadChunkLength) { primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper .getRandomBuffer(Math.toIntExact(length))); assertNotNull(primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } private static Stream<Arguments> bufferedUploadVariousPartitions() { return Stream.of( Arguments.of(1024L, null), Arguments.of(1024L, 1024L), Arguments.of(1024L, 256L), Arguments.of(4L * Constants.MB, null), Arguments.of(4L * Constants.MB, 1024L), Arguments.of(20L * Constants.MB, null), Arguments.of(20L * Constants.MB, 4L * Constants.MB) ); } @Test public void bufferedUploadErrorPartitionTooBig() { long length = 20 * Constants.MB; long uploadChunkLength = 20 * Constants.MB; primaryFileClient.create(length); ByteArrayInputStream data = new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) length)); assertThrows(Exception.class, () -> primaryFileClient.upload(data, length, new ParallelTransferOptions() .setBlockSizeLong(uploadChunkLength).setMaxSingleUploadSizeLong(uploadChunkLength))); } @Test public void uploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1L), null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void parallelUploadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadRangeDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong())); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadDataRetryOnTransientFailure() { ShareFileClient clientWithFailure = getFileClient(ENVIRONMENT.getPrimaryAccount().getCredential(), primaryFileClient.getFileUrl(), new TransientFailureInjectingHttpPipelinePolicy()); primaryFileClient.create(1024); clientWithFailure.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); ByteArrayOutputStream os = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(os, new ShareFileRange(0, DATA.getDefaultDataSizeLong() - 1), null, null, null); assertArrayEquals(os.toByteArray(), DATA.getDefaultBytes()); } @Test public void uploadAndClearRange() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void uploadAndClearRangeWithArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); primaryFileClient.clearRangeWithResponse(7, 1, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); primaryFileClient.downloadWithResponse(stream, new ShareFileRange(1, 7L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void clearRangeTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(DATA.getDefaultDataSizeLong()); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.clearRangeWithResponse( DATA.getDefaultDataSizeLong(), 0, null, null), 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadAndClearRangeOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); fileClient.create(fullInfoString.length()); fileClient.uploadRange(fullInfoData, fullInfoString.length()); fileClient.clearRange(7); ByteArrayOutputStream stream = new ByteArrayOutputStream(); fileClient.downloadWithResponse(stream, new ShareFileRange(0, 6L), false, null, null); for (byte b : stream.toByteArray()) { assertEquals(0, b); } } @Test public void clearRangeError() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRange(30)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @Test public void clearRangeErrorArgs() { String fullInfoString = "please clear the range"; InputStream fullInfoData = FileShareTestHelper.getInputStream(fullInfoString.getBytes(StandardCharsets.UTF_8)); primaryFileClient.create(fullInfoString.length()); primaryFileClient.uploadRange(fullInfoData, fullInfoString.length()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.clearRangeWithResponse(7, 20, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 416, ShareErrorCode.INVALID_RANGE); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( DATA.getDefaultInputStream(), size), null, Context.NONE)); assertTrue(e.getMessage().contains(errMsg)); } private static Stream<Arguments> uploadDataLengthMismatchSupplier() { return Stream.of( Arguments.of(6, "more than"), Arguments.of(8, "less than")); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void parallelUploadDataLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.upload(DATA.getDefaultInputStream(), size, null)); assertTrue(e.getMessage().contains(errMsg)); } @ParameterizedTest @MethodSource("uploadDataLengthMismatchSupplier") public void uploadRangeLengthMismatch(int size, String errMsg) { primaryFileClient.create(1024); UnexpectedLengthException e = assertThrows(UnexpectedLengthException.class, () -> primaryFileClient.uploadRange(DATA.getDefaultInputStream(), size)); assertTrue(e.getMessage().contains(errMsg)); } @Test public void downloadDataError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.downloadWithResponse(new ByteArrayOutputStream(), new ShareFileRange(0, 1023L), false, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void uploadFileDoesNotExist() { File uploadFile = new File(testFolder.getPath() + "/fakefile.txt"); if (uploadFile.exists()) { assert uploadFile.delete(); } UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.uploadFromFile(uploadFile.getPath())); assertInstanceOf(NoSuchFileException.class, e.getCause()); uploadFile.delete(); } /* * Tests downloading a file using a default client that doesn't have a HttpClient passed to it. */ @LiveOnly @ParameterizedTest @ValueSource(ints = { 0, 20, 16 * 1024 * 1024, 8 * 1026 * 1024 + 10, 50 * Constants.MB }) public void downloadFileBufferCopy(int fileSize) throws IOException { ShareServiceClient shareServiceClient = new ShareServiceClientBuilder() .connectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()) .buildClient(); ShareFileClient fileClient = shareServiceClient.getShareClient(shareName) .createFile(filePath, fileSize); File file = FileShareTestHelper.getRandomFile(fileSize); fileClient.uploadFromFile(file.toPath().toString()); File outFile = new File(generatePathName() + ".txt"); if (outFile.exists()) { assertTrue(outFile.delete()); } fileClient.downloadToFile(outFile.toPath().toString()); assertTrue(FileShareTestHelper.compareFiles(file, outFile, 0, fileSize)); shareServiceClient.deleteShare(shareName); outFile.delete(); file.delete(); } @Test public void uploadAndDownloadFileExists() throws IOException { String data = "Download file exists"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (!downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); UncheckedIOException e = assertThrows(UncheckedIOException.class, () -> primaryFileClient.downloadToFile(downloadFile.getPath())); assertInstanceOf(FileAlreadyExistsException.class, e.getCause()); downloadFile.delete(); } @Test public void uploadAndDownloadToFileDoesNotExist() throws IOException { String data = "Download file DoesNotExist"; File downloadFile = new File(String.format("%s/%s.txt", testFolder.getPath(), prefix)); if (downloadFile.exists()) { assertTrue(downloadFile.createNewFile()); } primaryFileClient.create(data.length()); primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes(StandardCharsets.UTF_8)), data.length()); primaryFileClient.downloadToFile(downloadFile.getPath()); Scanner scanner = new Scanner(downloadFile).useDelimiter("\\Z"); assertEquals(data, scanner.next()); scanner.close(); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), downloadFile.getName()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangePreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; for (FileLastWrittenMode mode : modes) { primaryFileClient.create(Constants.KB); ShareFileProperties initialProps = primaryFileClient.getProperties(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions( new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = primaryFileClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @Disabled("the groovy test was not testing this test properly. need to investigate this test further.") @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void uploadRangeFromURL(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; primaryFileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); ShareFileClient client = fileBuilderHelper(shareName, "destination" + pathSuffix) .endpoint(primaryFileClient.getFileUrl().toString()) .buildFileClient(); client.create(1024); client.uploadRangeFromUrl(length, destinationOffset, sourceOffset, primaryFileClient.getFileUrl() + "?" + sasToken); ByteArrayOutputStream stream = new ByteArrayOutputStream(); client.download(stream); String result = new String(stream.toByteArray()); for (int i = 0; i < length; i++) { assertEquals(result.charAt(destinationOffset + i), data.charAt(sourceOffset + i)); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void uploadRangeFromURLOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClientSharedKey(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(1024); String data = "The quick brown fox jumps over the lazy dog"; int sourceOffset = 5; int length = 5; int destinationOffset = 0; fileClient.uploadRange(FileShareTestHelper.getInputStream(data.getBytes()), data.length()); StorageSharedKeyCredential credential = StorageSharedKeyCredential.fromConnectionString( ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(fileClient.getShareName()) .setFilePath(fileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); String fileNameDest = generatePathName(); ShareFileClient fileClientDest = dirClient.getFileClient(fileNameDest); fileClientDest.create(1024); Response<ShareFileUploadRangeFromUrlInfo> uploadResponse = fileClientDest.uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, fileClient.getFileUrl() + "?" + sasToken, null, null); ByteArrayOutputStream stream = new ByteArrayOutputStream(); ShareFileDownloadResponse downloadResponse = fileClientDest.downloadWithResponse(stream, null, null, null, null); ShareFileDownloadHeaders headers = downloadResponse.getDeserializedHeaders(); FileShareTestHelper.assertResponseStatusCode(uploadResponse, 201); assertTrue(downloadResponse.getStatusCode() == 200 || downloadResponse.getStatusCode() == 206); assertEquals(headers.getContentLength(), 1024); assertNotNull(headers.getETag()); assertNotNull(headers.getLastModified()); assertNotNull(headers.getFilePermissionKey()); assertNotNull(headers.getFileAttributes()); assertNotNull(headers.getFileLastWriteTime()); assertNotNull(headers.getFileCreationTime()); assertNotNull(headers.getFileChangeTime()); assertNotNull(headers.getFileParentId()); assertNotNull(headers.getFileId()); assertEquals(stream.toByteArray()[0], 117); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void uploadRangeFromUrlPreserveFileLastWrittenOn() { FileLastWrittenMode[] modes = {FileLastWrittenMode.NOW, FileLastWrittenMode.PRESERVE}; primaryFileClient.create(Constants.KB); ShareFileClient destinationClient = shareClient.getFileClient(generatePathName()); destinationClient.create(Constants.KB); ShareFileProperties initialProps = destinationClient.getProperties(); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); StorageSharedKeyCredential credential = StorageSharedKeyCredential .fromConnectionString(ENVIRONMENT.getPrimaryAccount().getConnectionString()); String sasToken = new ShareServiceSasSignatureValues() .setExpiryTime(testResourceNamer.now().plusDays(1)) .setPermissions(new ShareFileSasPermission().setReadPermission(true)) .setShareName(primaryFileClient.getShareName()) .setFilePath(primaryFileClient.getFilePath()) .generateSasQueryParameters(credential) .encode(); for (FileLastWrittenMode mode : modes) { destinationClient.uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(Constants.KB, primaryFileClient.getFileUrl() + "?" + sasToken).setLastWrittenMode(mode), null, null); ShareFileProperties resultProps = destinationClient.getProperties(); if (mode.equals(FileLastWrittenMode.PRESERVE)) { assertTrue(FileShareTestHelper.compareDatesWithPrecision( initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties().getFileLastWriteTime())); } else { assertNotEquals(initialProps.getSmbProperties().getFileLastWriteTime(), resultProps.getSmbProperties() .getFileLastWriteTime()); } } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(Constants.KB); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(Constants.KB); sourceClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sasToken = shareClient.generateSas(sasValues); Response<ShareFileUploadRangeFromUrlInfo> res = destinationClient.uploadRangeFromUrlWithResponse( new ShareFileUploadRangeFromUrlOptions(Constants.KB, sourceClient.getFileUrl() + "?" + sasToken), null, null); FileShareTestHelper.assertResponseStatusCode(res, 201); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void uploadRangeFromUrlTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareDirectoryClient directoryClient = shareClient.getRootDirectoryClient(); ShareFileClient sourceClient = directoryClient.getFileClient(generatePathName() + "."); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destinationClient = directoryClient.getFileClient(generatePathName() + "."); destinationClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); assertThrows(ShareStorageException.class, () -> destinationClient .uploadRangeFromUrlWithResponse(new ShareFileUploadRangeFromUrlOptions(DATA.getDefaultDataSizeLong(), sourceClient.getFileUrl()), null, null)); } @Test public void openInputStreamWithRange() throws IOException { primaryFileClient.create(1024); ShareFileRange shareFileRange = new ShareFileRange(5L, 10L); byte[] dataBytes = "long test string".getBytes(StandardCharsets.UTF_8); ByteArrayInputStream inputStreamData = new ByteArrayInputStream(dataBytes); primaryFileClient.upload(inputStreamData, dataBytes.length, null); int totalBytesRead = 0; StorageFileInputStream stream = primaryFileClient.openInputStream(shareFileRange); while (stream.read() != -1) { totalBytesRead++; } stream.close(); assertEquals(6, totalBytesRead); } @ParameterizedTest @ValueSource(strings = { "", "ü1ü" /* Something that needs to be url encoded. */ }) public void startCopy(String pathSuffix) { primaryFileClient = fileBuilderHelper(shareName, filePath + pathSuffix).buildFileClient(); primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithArgs(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, smbProperties, setFilePermission ? FILE_PERMISSION : null, permissionType, ignoreReadOnly, setArchiveAttribute, null, null, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); byte[] data = FileShareTestHelper.getRandomBuffer(Constants.KB); ByteArrayInputStream inputStream = new ByteArrayInputStream(data); sourceClient.uploadRange(inputStream, Constants.KB); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null); poller.waitForCompletion(); assertEquals(poller.poll().getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void startCopyTrailingDotFail() { shareClient = getShareClient(shareName, true, false); ShareFileClient sourceClient = shareClient.getFileClient(generatePathName() + "."); sourceClient.create(1024); ShareFileClient destClient = shareClient.getFileClient(generatePathName() + "."); destClient.create(1024); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSize()); ShareStorageException e = assertThrows(ShareStorageException.class, () -> destClient.beginCopy(sourceClient.getFileUrl(), new ShareFileCopyOptions(), null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Disabled("There is a race condition in Poller where it misses the first observed event if there is a gap between" + " the time subscribed and the time we start observing events.") @Test public void startCopyError() { primaryFileClient.create(1024); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy("some url", testMetadata, null); ShareStorageException e = assertThrows(ShareStorageException.class, poller::waitForCompletion); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.INVALID_HEADER_VALUE); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptions(boolean setFilePermissionKey, boolean setFilePermission, boolean ignoreReadOnly, boolean setArchiveAttribute, PermissionCopyModeType permissionType) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); if (setFilePermissionKey) { smbProperties.setFilePermissionKey(filePermissionKey); } ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(setFilePermission ? FILE_PERMISSION : null) .setIgnoreReadOnly(ignoreReadOnly) .setArchiveAttribute(setArchiveAttribute) .setPermissionCopyModeType(permissionType); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsIgnoreReadOnlyAndSetArchive() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setIgnoreReadOnly(true) .setArchiveAttribute(true); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsFilePermission() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsChangeTime() { ShareFileInfo client = primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); smbProperties.setFileChangeTime(testResourceNamer.now()); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(smbProperties.getFileChangeTime(), primaryFileClient.getProperties().getSmbProperties().getFileChangeTime()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsCopySmbFilePropertiesPermissionKey() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); smbProperties .setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs) .setFilePermissionKey(filePermissionKey); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); FileSmbProperties properties = primaryFileClient.getProperties().getSmbProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(properties.getFileCreationTime(), smbProperties.getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(properties.getFileLastWriteTime(), smbProperties.getFileLastWriteTime()); assertEquals(properties.getNtfsFileAttributes(), smbProperties.getNtfsFileAttributes()); } @Test public void startCopyWithOptionLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @Test public void startCopyWithOptionsInvalidLease() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions); assertThrows(ShareStorageException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @Test public void startCopyWithOptionsMetadata() { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); ShareFileCopyOptions options = new ShareFileCopyOptions() .setMetadata(testMetadata); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void startCopyWithOptionsWithOriginalSmbProperties() { primaryFileClient.create(1024); ShareFileProperties initialProperties = primaryFileClient.getProperties(); OffsetDateTime creationTime = initialProperties.getSmbProperties().getFileCreationTime(); OffsetDateTime lastWrittenTime = initialProperties.getSmbProperties().getFileLastWriteTime(); OffsetDateTime changedTime = initialProperties.getSmbProperties().getFileChangeTime(); EnumSet<NtfsFileAttributes> fileAttributes = initialProperties.getSmbProperties().getNtfsFileAttributes(); String sourceURL = primaryFileClient.getFileUrl(); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions conditions = new ShareRequestConditions().setLeaseId(leaseId); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(true) .setLastWrittenOn(true) .setChangedOn(true) .setFileAttributes(true); ShareFileCopyOptions options = new ShareFileCopyOptions() .setDestinationRequestConditions(conditions) .setSmbPropertiesToCopy(list); SyncPoller<ShareFileCopyInfo, Void> poller = primaryFileClient.beginCopy(sourceURL, options, null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); ShareFileProperties resultProperties = primaryFileClient.getProperties(); assertNotNull(pollResponse.getValue().getCopyId()); assertEquals(pollResponse.getStatus(), LongRunningOperationStatus.SUCCESSFULLY_COMPLETED); FileShareTestHelper.compareDatesWithPrecision(creationTime, resultProperties.getSmbProperties() .getFileCreationTime()); FileShareTestHelper.compareDatesWithPrecision(lastWrittenTime, resultProperties.getSmbProperties() .getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(changedTime, resultProperties.getSmbProperties() .getFileChangeTime()); assertEquals(fileAttributes, resultProperties.getSmbProperties().getNtfsFileAttributes()); } @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void startCopyWithOptionsCopySourceFileError(boolean createdOn, boolean lastWrittenOn, boolean changedOn, boolean fileAttributes) { primaryFileClient.create(1024); String sourceURL = primaryFileClient.getFileUrl(); EnumSet<NtfsFileAttributes> ntfs = EnumSet.of(NtfsFileAttributes.READ_ONLY, NtfsFileAttributes.ARCHIVE); CopyableFileSmbPropertiesList list = new CopyableFileSmbPropertiesList() .setCreatedOn(createdOn) .setLastWrittenOn(lastWrittenOn) .setChangedOn(changedOn) .setFileAttributes(fileAttributes); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFileChangeTime(testResourceNamer.now()) .setNtfsFileAttributes(ntfs); ShareFileCopyOptions options = new ShareFileCopyOptions() .setSmbProperties(smbProperties) .setFilePermission(FILE_PERMISSION) .setPermissionCopyModeType(PermissionCopyModeType.OVERRIDE) .setSmbPropertiesToCopy(list); assertThrows(IllegalArgumentException.class, () -> primaryFileClient.beginCopy(sourceURL, options, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void startCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient sourceClient = dirClient.getFileClient(generatePathName()); sourceClient.create(DATA.getDefaultDataSizeLong()); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadRange(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()); String sourceURL = sourceClient.getFileUrl(); SyncPoller<ShareFileCopyInfo, Void> poller = sourceClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse.getValue().getCopyId()); } @Test public void abortCopy() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null)); } @Test public void abortCopyInvalidLease() { int fileSize = Constants.MB; byte[] bytes = new byte[fileSize]; ByteArrayInputStream data = new ByteArrayInputStream(bytes); ShareFileClient primaryFileClient = fileBuilderHelper(shareName, filePath).buildFileClient(); primaryFileClient.create(fileSize); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, filePath).buildFileClient(); dest.create(fileSize); String leaseId = testResourceNamer.randomUuid(); ShareRequestConditions requestConditions = new ShareRequestConditions().setLeaseId(leaseId); assertThrows(ShareStorageException.class, () -> { SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy( sourceURL, new ShareFileCopyOptions().setDestinationRequestConditions(requestConditions), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); dest.abortCopyWithResponse(pollResponse.getValue().getCopyId(), requestConditions, null, null); }); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void abortCopyTrailingDot() { ByteArrayInputStream data = new ByteArrayInputStream(new byte[Constants.MB]); String fileName = generatePathName() + "."; ShareFileClient primaryFileClient = getFileClient(shareName, fileName, true, null); primaryFileClient.create(Constants.MB); primaryFileClient.uploadWithResponse(new ShareFileUploadOptions(data), null, null); String sourceURL = primaryFileClient.getFileUrl(); ShareFileClient dest = fileBuilderHelper(shareName, fileName).buildFileClient(); dest.create(Constants.MB); SyncPoller<ShareFileCopyInfo, Void> poller = dest.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> dest.abortCopy(pollResponse.getValue().getCopyId())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void abortCopyOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient sourceClient = dirClient.getFileClient(fileName); sourceClient.create(DATA.getDefaultDataSizeLong()); sourceClient.uploadWithResponse(new ShareFileUploadOptions(DATA.getDefaultInputStream()), null, null); String sourceURL = sourceClient.getFileUrl(); ShareFileClient destClient = dirClient.getFileClient(generatePathName()); destClient.create(DATA.getDefaultDataSizeLong()); SyncPoller<ShareFileCopyInfo, Void> poller = destClient.beginCopy(sourceURL, new ShareFileCopyOptions(), null); PollResponse<ShareFileCopyInfo> pollResponse = poller.poll(); assertNotNull(pollResponse); assertNotNull(pollResponse.getValue()); assertThrows(ShareStorageException.class, () -> destClient.abortCopy(pollResponse.getValue().getCopyId())); } @Test public void abortCopyError() { assertThrows(ShareStorageException.class, () -> primaryFileClient.abortCopy("randomId")); } @Test public void deleteFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void deleteFileTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); FileShareTestHelper.assertResponseStatusCode(shareFileClient.deleteWithResponse(null, null), 202); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void deleteFileOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); FileShareTestHelper.assertResponseStatusCode(fileClient.deleteWithResponse(null, null), 202); } @Test public void deleteFileError() { ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.deleteWithResponse(null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 404, ShareErrorCode.RESOURCE_NOT_FOUND); } @Test public void deleteIfExistsFile() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.deleteIfExistsWithResponse(null, null, null), 202); } @Test public void deleteIfExistsFileMin() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); primaryFileClient.deleteIfExists(); } @Test public void deleteIfExistsFileThatDoesNotExist() { ShareFileClient client = shareClient.getFileClient(generateShareName()); Response<Boolean> response = client.deleteIfExistsWithResponse(null, null, null); assertFalse(response.getValue()); FileShareTestHelper.assertResponseStatusCode(response, 404); assertFalse(client.exists()); } @Test public void deleteIfExistsFileThatWasAlreadyDeleted() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); assertTrue(primaryFileClient.deleteIfExists()); assertFalse(primaryFileClient.deleteIfExists()); } @Test public void getProperties() { primaryFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = primaryFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void getPropertiesTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileProperties> resp = shareFileClient.getPropertiesWithResponse(null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void getPropertiesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); ShareFileInfo createInfo = fileClient.create(Constants.KB); ShareFileProperties properties = fileClient.getProperties(); assertEquals(createInfo.getETag(), properties.getETag()); assertEquals(createInfo.getLastModified(), properties.getLastModified()); assertEquals(createInfo.getSmbProperties().getFilePermissionKey(), properties.getSmbProperties().getFilePermissionKey()); assertEquals(createInfo.getSmbProperties().getNtfsFileAttributes(), properties.getSmbProperties().getNtfsFileAttributes()); assertEquals(createInfo.getSmbProperties().getFileLastWriteTime(), properties.getSmbProperties().getFileLastWriteTime()); assertEquals(createInfo.getSmbProperties().getFileCreationTime(), properties.getSmbProperties().getFileCreationTime()); assertEquals(createInfo.getSmbProperties().getFileChangeTime(), properties.getSmbProperties().getFileChangeTime()); assertEquals(createInfo.getSmbProperties().getParentId(), properties.getSmbProperties().getParentId()); assertEquals(createInfo.getSmbProperties().getFileId(), properties.getSmbProperties().getFileId()); } @Test public void getPropertiesError() { ShareStorageException ex = assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); assertTrue(ex.getMessage().contains("ResourceNotFound")); } @Test public void setHttpHeadersFpk() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); String filePermissionKey = shareClient.createPermission(FILE_PERMISSION); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()) .setFilePermissionKey(filePermissionKey); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, null, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @Test public void setHttpHeadersFp() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); smbProperties.setFileCreationTime(testResourceNamer.now()) .setFileLastWriteTime(testResourceNamer.now()); Response<ShareFileInfo> resp = primaryFileClient.setPropertiesWithResponse(512, httpHeaders, smbProperties, FILE_PERMISSION, null, null); FileShareTestHelper.assertResponseStatusCode(resp, 200); assertNotNull(resp.getValue().getETag()); assertNotNull(resp.getValue().getLastModified()); assertNotNull(resp.getValue().getSmbProperties()); assertNotNull(resp.getValue().getSmbProperties().getFilePermissionKey()); assertNotNull(resp.getValue().getSmbProperties().getNtfsFileAttributes()); assertNotNull(resp.getValue().getSmbProperties().getFileLastWriteTime()); assertNotNull(resp.getValue().getSmbProperties().getFileCreationTime()); assertNotNull(resp.getValue().getSmbProperties().getFileChangeTime()); assertNotNull(resp.getValue().getSmbProperties().getParentId()); assertNotNull(resp.getValue().getSmbProperties().getFileId()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void setHttpHeadersChangeTime() { primaryFileClient.create(512); OffsetDateTime changeTime = testResourceNamer.now(); primaryFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(primaryFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setHttpHeadersTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.create(1024); OffsetDateTime changeTime = testResourceNamer.now(); shareFileClient.setProperties(512, null, new FileSmbProperties().setFileChangeTime(changeTime), null); FileShareTestHelper.compareDatesWithPrecision(shareFileClient.getProperties().getSmbProperties() .getFileChangeTime(), changeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setHttpHeadersOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); httpHeaders = new ShareFileHttpHeaders() .setContentType("application/octet-stream") .setContentDisposition("attachment") .setCacheControl("no-transform") .setContentEncoding("gzip") .setContentLanguage("en"); Response<ShareFileInfo> res = fileClient.setPropertiesWithResponse(Constants.KB, httpHeaders, null, null, null, null); ShareFileProperties properties = fileClient.getProperties(); FileShareTestHelper.assertResponseStatusCode(res, 200); assertNotNull(res.getValue().getETag()); assertEquals(res.getValue().getETag(), res.getHeaders().getValue(HttpHeaderName.ETAG)); assertEquals(properties.getContentType(), "application/octet-stream"); assertEquals(properties.getContentDisposition(), "attachment"); assertEquals(properties.getCacheControl(), "no-transform"); assertEquals(properties.getContentEncoding(), "gzip"); assertNull(properties.getContentMd5()); } @Test public void setHttpHeadersError() { primaryFileClient.createWithResponse(1024, null, null, null, null, null, null); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setPropertiesWithResponse(-1, null, null, null, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.OUT_OF_RANGE_INPUT); } @Test public void setMetadata() { primaryFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = primaryFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = primaryFileClient .setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = primaryFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void setMetadataTrailingDot() { ShareFileClient shareFileClient = getFileClient(shareName, generatePathName() + ".", true, null); shareFileClient.createWithResponse(1024, httpHeaders, null, null, testMetadata, null, null); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileProperties getPropertiesBefore = shareFileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = shareFileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = shareFileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void setMetadataOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.createWithResponse(Constants.KB, null, null, null, testMetadata, null, null); ShareFileProperties getPropertiesBefore = fileClient.getProperties(); Response<ShareFileMetadataInfo> setPropertiesResponse = fileClient.setMetadataWithResponse(updatedMetadata, null, null); ShareFileProperties getPropertiesAfter = fileClient.getProperties(); assertEquals(testMetadata, getPropertiesBefore.getMetadata()); FileShareTestHelper.assertResponseStatusCode(setPropertiesResponse, 200); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @Test public void setMetadataError() { primaryFileClient.create(1024); Map<String, String> errorMetadata = Collections.singletonMap("", "value"); ShareStorageException e = assertThrows(ShareStorageException.class, () -> primaryFileClient.setMetadataWithResponse(errorMetadata, null, null)); FileShareTestHelper.assertExceptionStatusCodeAndMessage(e, 400, ShareErrorCode.EMPTY_METADATA_KEY); } @Test public void listRanges() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); String fileName = generatePathName() + "."; String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesWithRange() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshot() throws IOException { String fileName = generatePathName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot(snapInfo.getSnapshot()) .buildFileClient(); primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesSnapshotFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); primaryFileClient = fileBuilderHelper(shareName, filePath) .snapshot("2020-08-07T16:58:02.0000000Z") .buildFileClient(); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRanges(new ShareFileRange(0, 511L), null, null).forEach(it -> { assertEquals(0, it.getStart()); assertEquals(511, it.getEnd()); })); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesOAuth() throws IOException { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); String fileName = generatePathName(); ShareFileClient fileClient = dirClient.getFileClient(fileName); fileClient.create(Constants.KB); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); fileClient.uploadFromFile(uploadFile); fileClient.listRanges().forEach(it -> { assertEquals(0, it.getStart()); assertEquals(1023, it.getEnd()); }); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @ParameterizedTest @MethodSource("com.azure.storage.file.share.FileShareTestHelper public void listRangesDiff(List<FileRange> rangesToUpdate, List<FileRange> rangesToClear, List<FileRange> expectedRanges, List<ClearRange> expectedClearRanges) { primaryFileClient.create(4 * Constants.MB); primaryFileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(4 * Constants.MB)), 4 * Constants.MB); String snapshotId = primaryFileServiceClient.getShareClient(primaryFileClient.getShareName()) .createSnapshot() .getSnapshot(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.uploadRangeWithResponse( new ShareFileUploadRangeOptions(new ByteArrayInputStream( FileShareTestHelper.getRandomBuffer((int) size)), size).setOffset(it.getStart()), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; primaryFileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = primaryFileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); ClearRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listRangesDiffOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); fileClient.uploadRange(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer(Constants.KB)), Constants.KB); String snapshotId = primaryFileServiceClient.getShareClient(fileClient.getShareName()) .createSnapshot() .getSnapshot(); List<FileRange> rangesToUpdate = FileShareTestHelper.createFileRanges(); List<FileRange> rangesToClear = FileShareTestHelper.createFileRanges(); List<FileRange> expectedRanges = FileShareTestHelper.createFileRanges(); List<FileRange> expectedClearRanges = FileShareTestHelper.createFileRanges(); rangesToUpdate.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.uploadWithResponse(new ByteArrayInputStream(FileShareTestHelper.getRandomBuffer((int) size)), size, it.getStart(), null, null); }); rangesToClear.forEach(it -> { long size = it.getEnd() - it.getStart() + 1; fileClient.clearRangeWithResponse(size, it.getStart(), null, null); }); ShareFileRangeList rangeDiff = fileClient.listRangesDiff(snapshotId); assertEquals(expectedRanges.size(), rangeDiff.getRanges().size()); assertEquals(expectedClearRanges.size(), rangeDiff.getClearRanges().size()); for (int i = 0; i < expectedRanges.size(); i++) { FileRange actualRange = rangeDiff.getRanges().get(i); FileRange expectedRange = expectedRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } for (int i = 0; i < expectedClearRanges.size(); i++) { ClearRange actualRange = rangeDiff.getClearRanges().get(i); FileRange expectedRange = expectedClearRanges.get(i); assertEquals(expectedRange.getStart(), actualRange.getStart()); assertEquals(expectedRange.getEnd(), actualRange.getEnd()); } } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffWithRange() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2020-02-10") @Test public void listRangesDiffLease() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadRangeWithResponse(new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L), null, null); String leaseId = createLeaseClient(primaryFileClient).acquireLease(); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions().setLeaseId(leaseId)), null, null) .getValue().getRanges().get(0); assertEquals(1024, range.getStart()); assertEquals(1030, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listRangesDiffTrailingDot() throws IOException { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); String fileNameWithDot = generateShareName() + "."; primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileNameWithDot); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); ShareFileUploadRangeOptions options = new ShareFileUploadRangeOptions(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong()).setOffset(1024L); primaryFileClient.uploadRangeWithResponse(options, null, null); FileRange range = primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()).setRange(new ShareFileRange(1025L, 1026L)), null, null).getValue().getRanges().get(0); assertEquals(1025, range.getStart()); assertEquals(1026, range.getEnd()); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileNameWithDot); } @Test public void listRangesDiffLeaseFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024 + DATA.getDefaultDataSizeLong()); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); ShareSnapshotInfo snapInfo = shareClient.createSnapshot(); primaryFileClient.uploadWithResponse(DATA.getDefaultInputStream(), DATA.getDefaultDataSizeLong(), 1024L, null, null); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions(snapInfo.getSnapshot()) .setRequestConditions(new ShareRequestConditions() .setLeaseId(testResourceNamer.randomUuid())), null, null).getValue().getRanges().get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listRangesDiffFail() throws IOException { String fileName = generateShareName(); primaryFileClient.create(1024); String uploadFile = FileShareTestHelper.createRandomFileWithLength(1024, testFolder, fileName); primaryFileClient.uploadFromFile(uploadFile); assertThrows(ShareStorageException.class, () -> primaryFileClient.listRangesDiffWithResponse( new ShareFileListRangesDiffOptions("2020-08-07T16:58:02.0000000Z"), null, null).getValue().getRanges() .get(0)); FileShareTestHelper.deleteFileIfExists(testFolder.getPath(), fileName); } @Test public void listHandles() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @Test public void listHandlesWithMaxResult() { primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles(2, null, null).stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void listHandlesTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(1024); assertEquals(0, primaryFileClient.listHandles().stream().count()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void listHandlesOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder().shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(Constants.KB); assertEquals(0, fileClient.listHandles().stream().count()); } @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void listHandlesAccessRights() { ShareClient shareClient = primaryFileServiceClient.getShareClient("myshare"); ShareDirectoryClient directoryClient = shareClient.getDirectoryClient("mydirectory"); ShareFileClient fileClient = directoryClient.getFileClient("myfile"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertEquals(list.get(0).getAccessRights().get(0), ShareFileHandleAccessRights.WRITE); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2023-01-03") @Test public void forceCloseHandleMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @Test public void forceCloseHandleInvalidHandleID() { primaryFileClient.create(512); assertThrows(ShareStorageException.class, () -> primaryFileClient.forceCloseHandle("invalidHandleId")); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void forceCloseHandleTrailingDot() { ShareFileClient primaryFileClient = getFileClient(shareName, generatePathName() + ".", true, null); primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void forceCloseHandleOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName) .getDirectoryClient(generatePathName()); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); CloseHandlesInfo handlesClosedInfo = fileClient.forceCloseHandle("1"); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2019-07-07") @Test public void forceCloseAllHandlesMin() { primaryFileClient.create(512); CloseHandlesInfo handlesClosedInfo = primaryFileClient.forceCloseAllHandles(null, null); assertEquals(0, handlesClosedInfo.getClosedHandles()); assertEquals(0, handlesClosedInfo.getFailedHandles()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMin() { primaryFileClient.create(512); assertNotNull(primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(strings = {"\u200B", "\u200C", "\u200D", "\uFEFF"}) @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameWithResponse() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()), null, null); ShareFileClient renamedClient = resp.getValue(); assertNotNull(renamedClient.getProperties()); assertThrows(ShareStorageException.class, () -> primaryFileClient.getProperties()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-02-12") @Test public void renameSasToken() { ShareFileSasPermission permissions = new ShareFileSasPermission() .setReadPermission(true) .setWritePermission(true) .setCreatePermission(true) .setDeletePermission(true); OffsetDateTime expiryTime = testResourceNamer.now().plusDays(1); ShareServiceSasSignatureValues sasValues = new ShareServiceSasSignatureValues(expiryTime, permissions); String sas = shareClient.generateSas(sasValues); ShareFileClient client = getFileClient(sas, primaryFileClient.getFileUrl()); primaryFileClient.create(1024); String fileName = generatePathName(); ShareFileClient destClient = client.rename(fileName); assertNotNull(destClient.getProperties()); assertEquals(fileName, destClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDifferentDirectory() { primaryFileClient.create(512); ShareDirectoryClient dc = shareClient.getDirectoryClient(generatePathName()); dc.create(); ShareFileClient destinationPath = dc.getFileClient(generatePathName()); ShareFileClient resultClient = primaryFileClient.rename(destinationPath.getFilePath()); assertTrue(destinationPath.exists()); assertEquals(destinationPath.getFilePath(), resultClient.getFilePath()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameReplaceIfExists(boolean replaceIfExists) { primaryFileClient.create(512); ShareFileClient destination = shareClient.getFileClient(generatePathName()); destination.create(512); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destination.getFilePath()) .setReplaceIfExists(replaceIfExists), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(replaceIfExists, !exception); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @ParameterizedTest @ValueSource(booleans = {true, false}) public void renameIgnoreReadOnly(boolean ignoreReadOnly) { primaryFileClient.create(512); FileSmbProperties props = new FileSmbProperties() .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.READ_ONLY)); ShareFileClient destinationFile = shareClient.getFileClient(generatePathName()); destinationFile.createWithResponse(512L, null, props, null, null, null, null, null); boolean exception = false; try { primaryFileClient.renameWithResponse(new ShareFileRenameOptions(destinationFile.getFilePath()) .setIgnoreReadOnly(ignoreReadOnly).setReplaceIfExists(true), null, null); } catch (ShareStorageException ignored) { exception = true; } assertEquals(exception, !ignoreReadOnly); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermission() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission), null, null).getValue(); assertNotNull(destClient.getProperties().getSmbProperties().getFilePermissionKey()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameFilePermissionAndKeySet() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()) .setFilePermission(filePermission) .setSmbProperties(new FileSmbProperties().setFilePermissionKey("filePermissionkey")), null, null) .getValue()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameFileSmbProperties() { primaryFileClient.create(512); String filePermission = "O:S-1-5-21-2127521184-1604012920-1887927527-21560751G:S-1-5-21-2127521184-1604012920-1887927527-513D:AI(A;;FA;;;SY)(A;;FA;;;BA)(A;;0x1200a9;;;S-1-5-21-397955417-626881126-188441444-3053964)"; String permissionKey = shareClient.createPermission(filePermission); OffsetDateTime fileCreationTime = testResourceNamer.now().minusDays(5); OffsetDateTime fileLastWriteTime = testResourceNamer.now().minusYears(2); OffsetDateTime fileChangeTime = testResourceNamer.now(); FileSmbProperties smbProperties = new FileSmbProperties() .setFilePermissionKey(permissionKey) .setNtfsFileAttributes(EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)) .setFileCreationTime(fileCreationTime) .setFileLastWriteTime(fileLastWriteTime) .setFileChangeTime(fileChangeTime); ShareFileClient destClient = primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSmbProperties(smbProperties), null, null).getValue(); ShareFileProperties destProperties = destClient.getProperties(); assertEquals(destProperties.getSmbProperties().getNtfsFileAttributes(), EnumSet.of(NtfsFileAttributes.ARCHIVE, NtfsFileAttributes.READ_ONLY)); assertNotNull(destProperties.getSmbProperties().getFileCreationTime()); assertNotNull(destProperties.getSmbProperties().getFileLastWriteTime()); FileShareTestHelper.compareDatesWithPrecision(destProperties.getSmbProperties().getFileChangeTime(), fileChangeTime); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameMetadata() { primaryFileClient.create(512); Map<String, String> updatedMetadata = Collections.singletonMap("update", "value"); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setMetadata(updatedMetadata), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties getPropertiesAfter = renamedClient.getProperties(); assertEquals(updatedMetadata, getPropertiesAfter.getMetadata()); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2022-11-02") @Test public void renameTrailingDot() { shareClient = getShareClient(shareName, true, true); ShareDirectoryClient rootDirectory = shareClient.getRootDirectoryClient(); ShareFileClient primaryFileClient = rootDirectory.getFileClient(generatePathName() + "."); primaryFileClient.create(1024); Response<ShareFileClient> response = primaryFileClient .renameWithResponse(new ShareFileRenameOptions(generatePathName() + "."), null, null); FileShareTestHelper.assertResponseStatusCode(response, 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameError() { primaryFileClient = shareClient.getFileClient(generatePathName()); assertThrows(ShareStorageException.class, () -> primaryFileClient.rename(generatePathName())); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceAC() { primaryFileClient.create(512); String leaseID = setupFileLeaseCondition(primaryFileClient, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setSourceRequestConditions(src), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameSourceACFail() { primaryFileClient.create(512); setupFileLeaseCondition(primaryFileClient, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(ShareStorageException.class, () -> primaryFileClient.renameWithResponse(new ShareFileRenameOptions(generatePathName()) .setSourceRequestConditions(src), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestAC() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); String leaseID = setupFileLeaseCondition(destFile, RECEIVED_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(leaseID); FileShareTestHelper.assertResponseStatusCode(primaryFileClient.renameWithResponse( new ShareFileRenameOptions(pathName).setDestinationRequestConditions(src).setReplaceIfExists(true), null, null), 200); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameDestACFail() { primaryFileClient.create(512); String pathName = generatePathName(); ShareFileClient destFile = shareClient.getFileClient(pathName); destFile.create(512); setupFileLeaseCondition(destFile, GARBAGE_LEASE_ID); ShareRequestConditions src = new ShareRequestConditions() .setLeaseId(GARBAGE_LEASE_ID); assertThrows(RuntimeException.class, () -> destFile.renameWithResponse(new ShareFileRenameOptions(pathName) .setDestinationRequestConditions(src).setReplaceIfExists(true), null, null)); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-06-08") @Test public void renameContentType() { primaryFileClient.create(512); Response<ShareFileClient> resp = primaryFileClient.renameWithResponse( new ShareFileRenameOptions(generatePathName()).setContentType("mytype"), null, null); ShareFileClient renamedClient = resp.getValue(); ShareFileProperties props = renamedClient.getProperties(); assertEquals(props.getContentType(), "mytype"); } @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2021-04-10") @Test public void renameOAuth() { ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP)); String dirName = generatePathName(); ShareDirectoryClient dirClient = oAuthServiceClient.getShareClient(shareName).getDirectoryClient(dirName); dirClient.create(); ShareFileClient fileClient = dirClient.getFileClient(generatePathName()); fileClient.create(512); String fileRename = generatePathName(); Response<ShareFileClient> resp = fileClient.renameWithResponse(new ShareFileRenameOptions(fileRename), null, null); ShareFileClient renamedClient = resp.getValue(); renamedClient.getProperties(); assertEquals(fileRename, renamedClient.getFilePath()); assertThrows(ShareStorageException.class, fileClient::getProperties); } @Test public void getSnapshotId() { String snapshot = OffsetDateTime.of(LocalDateTime.of(2000, 1, 1, 1, 1), ZoneOffset.UTC).toString(); ShareFileClient shareSnapshotClient = fileBuilderHelper(shareName, filePath).snapshot(snapshot) .buildFileClient(); assertEquals(snapshot, shareSnapshotClient.getShareSnapshotId()); } @Test public void getShareName() { assertEquals(shareName, primaryFileClient.getShareName()); } @Test public void getFilePath() { assertEquals(filePath, primaryFileClient.getFilePath()); } public void perCallPolicy() { primaryFileClient.create(512); ShareFileClient fileClient = fileBuilderHelper(primaryFileClient.getShareName(), primaryFileClient.getFilePath()).addPolicy(getPerCallVersionPolicy()).buildFileClient(); Response<ShareFileProperties> response = fileClient.getPropertiesWithResponse(null, null); assertEquals(response.getHeaders().getValue(X_MS_VERSION), "2017-11-09"); } @Test public void defaultAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(null) /* should default to "https: ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void storageAccountAudience() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience(shareClient.getAccountName()))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } @Test public void audienceError() { String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(ShareAudience.createShareServiceAccountAudience("badAudience"))); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); ShareStorageException e = assertThrows(ShareStorageException.class, aadFileClient::exists); assertEquals(ShareErrorCode.AUTHENTICATION_FAILED, e.getErrorCode()); } @Test public void audienceFromString() { String url = String.format("https: ShareAudience audience = ShareAudience.fromString(url); String fileName = generatePathName(); ShareFileClient fileClient = fileBuilderHelper(shareName, fileName).buildFileClient(); fileClient.create(Constants.KB); ShareServiceClient oAuthServiceClient = getOAuthServiceClient(new ShareServiceClientBuilder() .shareTokenIntent(ShareTokenIntent.BACKUP) .audience(audience)); ShareFileClient aadFileClient = oAuthServiceClient.getShareClient(shareName).getFileClient(fileName); assertTrue(aadFileClient.exists()); } /* Uncomment this test when Client Name is enabled with STG 93. @PlaybackOnly @RequiredServiceVersion(clazz = ShareServiceVersion.class, min = "2024-02-04") @Test public void listHandlesClientName() { ShareClient client = primaryFileServiceClient.getShareClient("testing"); ShareDirectoryClient directoryClient = client.getDirectoryClient("dir1"); ShareFileClient fileClient = directoryClient.getFileClient("test.txt"); List<HandleItem> list = fileClient.listHandles().stream().collect(Collectors.toList()); assertNotNull(list.get(0).getClientName()); } */ }
See my comment about the schema.
void onOffMapTest() { assertTrue(serverSideProperties.getOnOff().get("Gamma")); }
assertTrue(serverSideProperties.getOnOff().get("Gamma"));
void onOffMapTest() { assertTrue(serverSideProperties.getOnOff().get("Gamma")); }
class ServerSideFeatureManagementPropertiesTest { @Autowired private FeatureManagementProperties serverSideProperties; @Test @Test void featureManagementTest() { final Feature alphaFeatureItem = serverSideProperties.getFeatureManagement().get("Alpha"); assertEquals(alphaFeatureItem.getKey(), "Alpha"); assertEquals(alphaFeatureItem.getEnabledFor().size(), 1); assertEquals(alphaFeatureItem.getEnabledFor().get(0).getName(), "Microsoft.Random"); final Feature betaFeatureItem = serverSideProperties.getFeatureManagement().get("Beta"); assertEquals(betaFeatureItem.getKey(), "Beta"); assertEquals(betaFeatureItem.getEnabledFor().size(), 1); assertEquals(betaFeatureItem.getEnabledFor().get(0).getName(), "Microsoft.TimeWindowFilter"); } }
class ServerSideFeatureManagementPropertiesTest { @Autowired private FeatureManagementProperties serverSideProperties; @Test @Test void featureManagementTest() { final Feature alphaFeatureItem = serverSideProperties.getFeatureManagement().get("Alpha"); assertEquals(alphaFeatureItem.getKey(), "Alpha"); assertEquals(alphaFeatureItem.getEnabledFor().size(), 1); assertEquals(alphaFeatureItem.getEnabledFor().get(0).getName(), "Microsoft.Random"); final Feature betaFeatureItem = serverSideProperties.getFeatureManagement().get("Beta"); assertEquals(betaFeatureItem.getKey(), "Beta"); assertEquals(betaFeatureItem.getEnabledFor().size(), 1); assertEquals(betaFeatureItem.getEnabledFor().get(0).getName(), "Microsoft.TimeWindowFilter"); final Feature deltaFeatureItem = serverSideProperties.getFeatureManagement().get("Delta"); assertEquals(deltaFeatureItem.getKey(), "Delta"); assertEquals(deltaFeatureItem.getEnabledFor().size(), 0); } }
This should support yaml. i.e. feature-flags and FeatureFlags should work.
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
if ("FeatureFlags".equalsIgnoreCase(key)) {
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
`serverSideFeature.getConditions().getClientFilters().size() > 0` is allowed to be 0. This should result in a feature flag returning false when enabled, as no feature filter returned true. This is how the current library works.
private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } }
if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null
private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
Updated
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
if ("FeatureFlags".equalsIgnoreCase(key)) {
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
Updated.
private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } }
if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null
private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
Can we make these constants
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
if ("FeatureFlags".equalsIgnoreCase(key)) {
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
Of course.
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if ("FeatureFlags".equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
if ("FeatureFlags".equalsIgnoreCase(key)) {
private void tryServerSideSchema(Map<? extends String, ? extends Object> features) { if (features.keySet().isEmpty()) { return; } String featureFlagsSectionKey = ""; for (String key : features.keySet()) { if (FEATURE_FLAG_PASCAL_CASE.equalsIgnoreCase(key) || FEATURE_FLAG_KEBAB_CASE.equalsIgnoreCase(key)) { featureFlagsSectionKey = key; break; } } if (featureFlagsSectionKey.isEmpty()) { return; } final Object featureFlagsObject = features.get(featureFlagsSectionKey); if (Map.class.isAssignableFrom(featureFlagsObject.getClass())) { final Map<String, Object> featureFlagsSection = (Map<String, Object>) featureFlagsObject; for (String key : featureFlagsSection.keySet()) { addServerSideFeature(featureFlagsSection, key); } } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null && serverSideFeature.getConditions().getClientFilters().size() > 0) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
class FeatureManagementProperties extends HashMap<String, Object> { private static final Logger LOGGER = LoggerFactory.getLogger(FeatureManagementProperties.class); private static final ObjectMapper MAPPER = new ObjectMapper() .setPropertyNamingStrategy(PropertyNamingStrategies.KEBAB_CASE); private static final long serialVersionUID = -1642032123104805346L; private static final String FEATURE_FLAG_PASCAL_CASE = "FeatureFlags"; private static final String FEATURE_FLAG_KEBAB_CASE = "feature-flags"; /** * Map of all Feature Flags that use Feature Filters. */ private transient Map<String, Feature> featureManagement; /** * Map of all Feature Flags that are just enabled/disabled. */ private Map<String, Boolean> onOff; public FeatureManagementProperties() { featureManagement = new HashMap<>(); onOff = new HashMap<>(); } @Override public void putAll(Map<? extends String, ? extends Object> m) { if (m == null) { return; } featureManagement = new HashMap<>(); onOff = new HashMap<>(); tryServerSideSchema(m); if (featureManagement.isEmpty() && onOff.isEmpty()) { tryClientSideSchema(m); } } private void tryClientSideSchema(Map<? extends String, ? extends Object> features) { for (String key : features.keySet()) { addFeature(features, key, ""); } } @SuppressWarnings("unchecked") private void addFeature(Map<? extends String, ? extends Object> features, String key, String combined) { Object featureValue = features.get(key); if (!combined.isEmpty() && !combined.endsWith(".")) { combined += "."; } if (featureValue instanceof Boolean) { onOff.put(combined + key, (Boolean) featureValue); } else { Feature feature = null; try { feature = MAPPER.convertValue(featureValue, Feature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", combined + key, featureValue.toString()); } if (feature != null && feature.getEnabledFor() == null && feature.getKey() == null) { if (Map.class.isAssignableFrom(featureValue.getClass())) { features = (Map<String, Object>) featureValue; for (String fKey : features.keySet()) { addFeature(features, fKey, combined + key); } } } else { if (feature != null) { feature.setKey(key); featureManagement.put(key, feature); } } } } private void addServerSideFeature(Map<? extends String, ? extends Object> features, String key) { final Object featureValue = features.get(key); ServerSideFeature serverSideFeature = null; try { serverSideFeature = MAPPER.convertValue(featureValue, ServerSideFeature.class); } catch (IllegalArgumentException e) { LOGGER.error("Found invalid feature {} with value {}.", key, featureValue.toString()); } if (serverSideFeature != null && serverSideFeature.getId() != null) { if (serverSideFeature.getConditions() != null && serverSideFeature.getConditions().getClientFilters() != null) { final Feature feature = new Feature(); feature.setKey(serverSideFeature.getId()); feature.setEvaluate(serverSideFeature.isEnabled()); feature.setEnabledFor(serverSideFeature.getConditions().getClientFilters()); feature.setRequirementType(serverSideFeature.getConditions().getRequirementType()); featureManagement.put(serverSideFeature.getId(), feature); } else { onOff.put(serverSideFeature.getId(), serverSideFeature.isEnabled()); } } } /** * @return the featureManagement */ public Map<String, Feature> getFeatureManagement() { return featureManagement; } /** * @return the onOff */ public Map<String, Boolean> getOnOff() { return onOff; } }
```suggestion if (System.getProperty("org.graalvm.nativeimage.imagecode") != null) { ```
private Set<Feature> initStatsbeatFeatures() { if (isGraalVmNativeExecution()) { return Collections.singleton(Feature.GRAAL_VM_NATIVE); } return Collections.emptySet(); }
if (isGraalVmNativeExecution()) {
private Set<Feature> initStatsbeatFeatures() { if (System.getProperty("org.graalvm.nativeimage.imagecode") != null) { return Collections.singleton(Feature.GRAAL_VM_NATIVE); } return Collections.emptySet(); }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME"; private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME"; private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private ClientOptions clientOptions; private boolean frozen; private TelemetryItemExporter builtTelemetryItemExporter; private StatsbeatModule statsbeatModule; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpPipeline cannot be changed after any of the build methods have been called")); } this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpClient cannot be changed after any of the build methods have been called")); } this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpLogOptions cannot be changed after any of the build methods have been called")); } this.httpLogOptions = httpLogOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpPipelinePolicies cannot be added after any of the build methods have been called")); } httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "clientOptions cannot be changed after any of the build methods have been called")); } this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "connectionString cannot be changed after any of the build methods have been called")); } this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "serviceVersion cannot be changed after any of the build methods have been called")); } this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "credential cannot be changed after any of the build methods have been called")); } this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildTraceExporter(defaultConfig); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildMetricExporter(defaultConfig); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildLogRecordExporter(defaultConfig); } /** * Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder. * * @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporters. */ public void install(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) { sdkBuilder.addPropertiesSupplier(() -> { Map<String, String> props = new HashMap<>(); props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true"); return props; }); sdkBuilder.addSpanExporterCustomizer( (spanExporter, configProperties) -> { if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) { internalBuildAndFreeze(configProperties); spanExporter = buildTraceExporter(configProperties); } return spanExporter; }); sdkBuilder.addMetricExporterCustomizer( (metricExporter, configProperties) -> { if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) { internalBuildAndFreeze(configProperties); metricExporter = buildMetricExporter(configProperties); } return metricExporter; }); sdkBuilder.addLogRecordExporterCustomizer( (logRecordExporter, configProperties) -> { if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) { internalBuildAndFreeze(configProperties); logRecordExporter = buildLogRecordExporter(configProperties); } return logRecordExporter; }); sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) -> sdkMeterProviderBuilder.registerView( InstrumentSelector.builder() .setMeterName("io.opentelemetry.sdk.trace") .build(), View.builder() .setAggregation(Aggregation.drop()) .build() ).registerView( InstrumentSelector.builder() .setMeterName("io.opentelemetry.sdk.logs") .build(), View.builder() .setAggregation(Aggregation.drop()) .build() )); } private void internalBuildAndFreeze(ConfigProperties configProperties) { if (!frozen) { HttpPipeline httpPipeline = createHttpPipeline(); statsbeatModule = initStatsbeatModule(configProperties); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried on sporadic network failures"); builtTelemetryItemExporter = AzureMonitorHelper.createTelemetryItemExporter(httpPipeline, statsbeatModule, tempDir, LocalStorageStats.noop()); startStatsbeatModule(statsbeatModule, configProperties, tempDir); frozen = true; } } private SpanExporter buildTraceExporter(ConfigProperties configProperties) { return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter, statsbeatModule); } private MetricExporter buildMetricExporter(ConfigProperties configProperties) { HeartbeatExporter.start( MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter); } private static boolean isGraalVmNativeExecution() { String imageCode = System.getProperty("org.graalvm.nativeimage.imagecode"); return imageCode != null; } private StatsbeatConnectionString getStatsbeatConnectionString() { return StatsbeatConnectionString.create(connectionString, null, null); } private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter); } private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) { return new SpanDataMapper( true, createDefaultsPopulator(configProperties), (event, instrumentationName) -> false, (span, event) -> false); } private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) { ConnectionString connectionString = getConnectionString(configProperties); return (builder, resource) -> { builder.setConnectionString(connectionString); builder.setResource(resource); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties); }; } private ConnectionString getConnectionString(ConfigProperties configProperties) { if (connectionString != null) { return connectionString; } ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING)); return Objects.requireNonNull(connectionString, "'connectionString' cannot be null"); } private HttpPipeline createHttpPipeline() { if (httpPipeline != null) { if (credential != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'credential' is not supported when custom 'httpPipeline' is specified")); } if (httpClient != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpClient' is not supported when custom 'httpPipeline' is specified")); } if (httpLogOptions != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpLogOptions' is not supported when custom 'httpPipeline' is specified")); } if (!httpPipelinePolicies.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified")); } if (clientOptions != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'clientOptions' is not supported when custom 'httpPipeline' is specified")); } return httpPipeline; } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration())); policies.add(new CookiePolicy()); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE)); } policies.addAll(httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new com.azure.core.http.HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(new NoopTracer()) .build(); } private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) { return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration); } private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties, File tempDir) { HttpPipeline statsbeatHttpPipeline = createStatsbeatHttpPipeline(); TelemetryItemExporter statsbeatTelemetryItemExporter = AzureMonitorHelper.createStatsbeatTelemetryItemExporter(statsbeatHttpPipeline, statsbeatModule, tempDir); statsbeatModule.start( statsbeatTelemetryItemExporter, this::getStatsbeatConnectionString, getConnectionString(configProperties)::getInstrumentationKey, false, configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)), configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)), false, initStatsbeatFeatures()); } private HttpPipeline createStatsbeatHttpPipeline() { if (httpPipeline != null) { return httpPipeline; } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration())); policies.add(new CookiePolicy()); policies.addAll(httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new com.azure.core.http.HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(new NoopTracer()) .build(); } }
class AzureMonitorExporterBuilder { private static final ClientLogger LOGGER = new ClientLogger(AzureMonitorExporterBuilder.class); private static final String APPLICATIONINSIGHTS_CONNECTION_STRING = "APPLICATIONINSIGHTS_CONNECTION_STRING"; private static final String APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE = "https: private static final String STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME"; private static final String STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME = "STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME"; private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-monitor-opentelemetry-exporter.properties"); private ConnectionString connectionString; private TokenCredential credential; @SuppressWarnings({"UnusedVariable", "FieldCanBeLocal"}) private AzureMonitorExporterServiceVersion serviceVersion; private HttpPipeline httpPipeline; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private final List<HttpPipelinePolicy> httpPipelinePolicies = new ArrayList<>(); private ClientOptions clientOptions; private boolean frozen; private TelemetryItemExporter builtTelemetryItemExporter; private StatsbeatModule statsbeatModule; /** * Creates an instance of {@link AzureMonitorExporterBuilder}. */ public AzureMonitorExporterBuilder() { } /** * Sets the HTTP pipeline to use for the service client. If {@code httpPipeline} is set, all other * settings are ignored. * * @param httpPipeline The HTTP pipeline to use for sending service requests and receiving * responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpPipeline(HttpPipeline httpPipeline) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpPipeline cannot be changed after any of the build methods have been called")); } this.httpPipeline = httpPipeline; return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpClient(HttpClient httpClient) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpClient cannot be changed after any of the build methods have been called")); } this.httpClient = httpClient; return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p>If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param httpLogOptions The logging configuration to use when sending and receiving HTTP * requests/responses. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder httpLogOptions(HttpLogOptions httpLogOptions) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpLogOptions cannot be changed after any of the build methods have been called")); } this.httpLogOptions = httpLogOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param httpPipelinePolicy a policy to be added to the http pipeline. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If {@code policy} is {@code null}. */ public AzureMonitorExporterBuilder addHttpPipelinePolicy(HttpPipelinePolicy httpPipelinePolicy) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "httpPipelinePolicies cannot be added after any of the build methods have been called")); } httpPipelinePolicies.add( Objects.requireNonNull(httpPipelinePolicy, "'policy' cannot be null.")); return this; } /** * Sets the client options such as application ID and custom headers to set on a request. * * @param clientOptions The client options. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder clientOptions(ClientOptions clientOptions) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "clientOptions cannot be changed after any of the build methods have been called")); } this.clientOptions = clientOptions; return this; } /** * Sets the connection string to use for exporting telemetry events to Azure Monitor. * * @param connectionString The connection string for the Azure Monitor resource. * @return The updated {@link AzureMonitorExporterBuilder} object. * @throws NullPointerException If the connection string is {@code null}. * @throws IllegalArgumentException If the connection string is invalid. */ public AzureMonitorExporterBuilder connectionString(String connectionString) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "connectionString cannot be changed after any of the build methods have been called")); } this.connectionString = ConnectionString.parse(connectionString); return this; } /** * Sets the Azure Monitor service version. * * @param serviceVersion The Azure Monitor service version. * @return The update {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder serviceVersion( AzureMonitorExporterServiceVersion serviceVersion) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "serviceVersion cannot be changed after any of the build methods have been called")); } this.serviceVersion = serviceVersion; return this; } /** * Sets the token credential required for authentication with the ingestion endpoint service. * * @param credential The Azure Identity TokenCredential. * @return The updated {@link AzureMonitorExporterBuilder} object. */ public AzureMonitorExporterBuilder credential(TokenCredential credential) { if (frozen) { throw LOGGER.logExceptionAsError(new IllegalStateException( "credential cannot be changed after any of the build methods have been called")); } this.credential = credential; return this; } /** * Creates an {@link AzureMonitorTraceExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link SpanExporter}. * * @return An instance of {@link AzureMonitorTraceExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public SpanExporter buildTraceExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildTraceExporter(defaultConfig); } /** * Creates an {@link AzureMonitorMetricExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link MetricExporter}. * * <p>When a new {@link MetricExporter} is created, it will automatically start {@link * HeartbeatExporter}. * * @return An instance of {@link AzureMonitorMetricExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public MetricExporter buildMetricExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildMetricExporter(defaultConfig); } /** * Creates an {@link AzureMonitorLogRecordExporter} based on the options set in the builder. This * exporter is an implementation of OpenTelemetry {@link LogRecordExporter}. * * @return An instance of {@link AzureMonitorLogRecordExporter}. * @throws NullPointerException if the connection string is not set on this builder or if the * environment variable "APPLICATIONINSIGHTS_CONNECTION_STRING" is not set. */ public LogRecordExporter buildLogRecordExporter() { ConfigProperties defaultConfig = DefaultConfigProperties.create(Collections.emptyMap()); internalBuildAndFreeze(defaultConfig); return buildLogRecordExporter(defaultConfig); } /** * Configures an {@link AutoConfiguredOpenTelemetrySdkBuilder} based on the options set in the builder. * * @param sdkBuilder the {@link AutoConfiguredOpenTelemetrySdkBuilder} in which to install the azure monitor exporters. */ public void install(AutoConfiguredOpenTelemetrySdkBuilder sdkBuilder) { sdkBuilder.addPropertiesSupplier(() -> { Map<String, String> props = new HashMap<>(); props.put("otel.traces.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put("otel.metrics.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put("otel.logs.exporter", AzureMonitorExporterProviderKeys.EXPORTER_NAME); props.put(AzureMonitorExporterProviderKeys.INTERNAL_USING_AZURE_MONITOR_EXPORTER_BUILDER, "true"); return props; }); sdkBuilder.addSpanExporterCustomizer( (spanExporter, configProperties) -> { if (spanExporter instanceof AzureMonitorSpanExporterProvider.MarkerSpanExporter) { internalBuildAndFreeze(configProperties); spanExporter = buildTraceExporter(configProperties); } return spanExporter; }); sdkBuilder.addMetricExporterCustomizer( (metricExporter, configProperties) -> { if (metricExporter instanceof AzureMonitorMetricExporterProvider.MarkerMetricExporter) { internalBuildAndFreeze(configProperties); metricExporter = buildMetricExporter(configProperties); } return metricExporter; }); sdkBuilder.addLogRecordExporterCustomizer( (logRecordExporter, configProperties) -> { if (logRecordExporter instanceof AzureMonitorLogRecordExporterProvider.MarkerLogRecordExporter) { internalBuildAndFreeze(configProperties); logRecordExporter = buildLogRecordExporter(configProperties); } return logRecordExporter; }); sdkBuilder.addMeterProviderCustomizer((sdkMeterProviderBuilder, config) -> sdkMeterProviderBuilder.registerView( InstrumentSelector.builder() .setMeterName("io.opentelemetry.sdk.trace") .build(), View.builder() .setAggregation(Aggregation.drop()) .build() ).registerView( InstrumentSelector.builder() .setMeterName("io.opentelemetry.sdk.logs") .build(), View.builder() .setAggregation(Aggregation.drop()) .build() )); } private void internalBuildAndFreeze(ConfigProperties configProperties) { if (!frozen) { HttpPipeline httpPipeline = createHttpPipeline(); statsbeatModule = initStatsbeatModule(configProperties); File tempDir = TempDirs.getApplicationInsightsTempDir( LOGGER, "Telemetry will not be stored to disk and retried on sporadic network failures"); builtTelemetryItemExporter = AzureMonitorHelper.createTelemetryItemExporter(httpPipeline, statsbeatModule, tempDir, LocalStorageStats.noop()); startStatsbeatModule(statsbeatModule, configProperties, tempDir); frozen = true; } } private SpanExporter buildTraceExporter(ConfigProperties configProperties) { return new AzureMonitorTraceExporter(createSpanDataMapper(configProperties), builtTelemetryItemExporter, statsbeatModule); } private MetricExporter buildMetricExporter(ConfigProperties configProperties) { HeartbeatExporter.start( MINUTES.toSeconds(15), createDefaultsPopulator(configProperties), builtTelemetryItemExporter::send); return new AzureMonitorMetricExporter( new MetricDataMapper(createDefaultsPopulator(configProperties), true), builtTelemetryItemExporter); } private StatsbeatConnectionString getStatsbeatConnectionString() { return StatsbeatConnectionString.create(connectionString, null, null); } private LogRecordExporter buildLogRecordExporter(ConfigProperties configProperties) { return new AzureMonitorLogRecordExporter( new LogDataMapper(true, false, createDefaultsPopulator(configProperties)), builtTelemetryItemExporter); } private SpanDataMapper createSpanDataMapper(ConfigProperties configProperties) { return new SpanDataMapper( true, createDefaultsPopulator(configProperties), (event, instrumentationName) -> false, (span, event) -> false); } private BiConsumer<AbstractTelemetryBuilder, Resource> createDefaultsPopulator(ConfigProperties configProperties) { ConnectionString connectionString = getConnectionString(configProperties); return (builder, resource) -> { builder.setConnectionString(connectionString); builder.setResource(resource); builder.addTag( ContextTagKeys.AI_INTERNAL_SDK_VERSION.toString(), VersionGenerator.getSdkVersion()); ResourceParser.updateRoleNameAndInstance(builder, resource, configProperties); }; } private ConnectionString getConnectionString(ConfigProperties configProperties) { if (connectionString != null) { return connectionString; } ConnectionString connectionString = ConnectionString.parse(configProperties.getString(APPLICATIONINSIGHTS_CONNECTION_STRING)); return Objects.requireNonNull(connectionString, "'connectionString' cannot be null"); } private HttpPipeline createHttpPipeline() { if (httpPipeline != null) { if (credential != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'credential' is not supported when custom 'httpPipeline' is specified")); } if (httpClient != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpClient' is not supported when custom 'httpPipeline' is specified")); } if (httpLogOptions != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpLogOptions' is not supported when custom 'httpPipeline' is specified")); } if (!httpPipelinePolicies.isEmpty()) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'httpPipelinePolicies' is not supported when custom 'httpPipeline' is specified")); } if (clientOptions != null) { throw LOGGER.logExceptionAsError(new IllegalStateException( "'clientOptions' is not supported when custom 'httpPipeline' is specified")); } return httpPipeline; } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration())); policies.add(new CookiePolicy()); if (credential != null) { policies.add(new BearerTokenAuthenticationPolicy(credential, APPLICATIONINSIGHTS_AUTHENTICATION_SCOPE)); } policies.addAll(httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new com.azure.core.http.HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(new NoopTracer()) .build(); } private StatsbeatModule initStatsbeatModule(ConfigProperties configProperties) { return new StatsbeatModule(PropertyHelper::lazyUpdateVmRpIntegration); } private void startStatsbeatModule(StatsbeatModule statsbeatModule, ConfigProperties configProperties, File tempDir) { HttpPipeline statsbeatHttpPipeline = createStatsbeatHttpPipeline(); TelemetryItemExporter statsbeatTelemetryItemExporter = AzureMonitorHelper.createStatsbeatTelemetryItemExporter(statsbeatHttpPipeline, statsbeatModule, tempDir); statsbeatModule.start( statsbeatTelemetryItemExporter, this::getStatsbeatConnectionString, getConnectionString(configProperties)::getInstrumentationKey, false, configProperties.getLong(STATSBEAT_SHORT_INTERVAL_SECONDS_PROPERTY_NAME, MINUTES.toSeconds(15)), configProperties.getLong(STATSBEAT_LONG_INTERVAL_SECONDS_PROPERTY_NAME, DAYS.toSeconds(1)), false, initStatsbeatFeatures()); } private HttpPipeline createStatsbeatHttpPipeline() { if (httpPipeline != null) { return httpPipeline; } List<HttpPipelinePolicy> policies = new ArrayList<>(); String clientName = PROPERTIES.getOrDefault("name", "UnknownName"); String clientVersion = PROPERTIES.getOrDefault("version", "UnknownVersion"); String applicationId = CoreUtils.getApplicationId(clientOptions, httpLogOptions); policies.add(new UserAgentPolicy(applicationId, clientName, clientVersion, Configuration.getGlobalConfiguration())); policies.add(new CookiePolicy()); policies.addAll(httpPipelinePolicies); policies.add(new HttpLoggingPolicy(httpLogOptions)); return new com.azure.core.http.HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .tracer(new NoopTracer()) .build(); } }
Should we add a specific message for the `basic` tier? it tells the user that the basic tier is valid but not supported.
public void afterPropertiesSet() throws Exception { if (isPasswordlessEnabled()) { if (!StringUtils.hasText(namespace)) { throw new IllegalArgumentException("Passwordless connections enabled, 'spring.jms.servicebus.namespace' should be provided."); } } else { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided."); } } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard")) { throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid"); } }
throw new IllegalArgumentException("'spring.jms.servicebus.pricing-tier' is not valid");
public void afterPropertiesSet() throws Exception { if (isPasswordlessEnabled()) { if (!StringUtils.hasText(namespace)) { throw new IllegalArgumentException("Passwordless connections enabled, 'spring.jms.servicebus.namespace' should be provided."); } } else { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided."); } } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard")) { String errMessage = null; if ("basic".equalsIgnoreCase(pricingTier)) { errMessage = "The basic tier is not supported by Service Bus JMS. Please use standard or premium tier instead."; } else { errMessage = "'spring.jms.servicebus.pricing-tier' is not valid."; } throw new IllegalArgumentException(errMessage); } }
class AzureServiceBusJmsProperties implements InitializingBean, PasswordlessProperties { /** * Service Bus JMS properties prefix. */ public static final String PREFIX = "spring.jms.servicebus"; private static final String SERVICE_BUS_SCOPE_AZURE = "https: private static final String SERVICE_BUS_SCOPE_AZURE_CHINA = SERVICE_BUS_SCOPE_AZURE; /** * @deprecated AZURE_GERMANY is deprecated. Please use other Service Bus scopes. */ @Deprecated private static final String SERVICE_BUS_SCOPE_AZURE_GERMANY = SERVICE_BUS_SCOPE_AZURE; private static final String SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT = SERVICE_BUS_SCOPE_AZURE; private static final Map<CloudType, String> SERVICEBUS_SCOPE_MAP = new HashMap<CloudType, String>() { { put(CloudType.AZURE, SERVICE_BUS_SCOPE_AZURE); put(CloudType.AZURE_CHINA, SERVICE_BUS_SCOPE_AZURE_CHINA); put(CloudType.AZURE_US_GOVERNMENT, SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT); } }; private AzureProfileConfigurationProperties profile = new AzureProfileConfigurationProperties(); /** * The scopes required for the access token. */ private String scopes; private TokenCredentialConfigurationProperties credential = new TokenCredentialConfigurationProperties(); /** * Whether to enable supporting azure identity token credentials. * * If the value is true, then 'spring.jms.servicebus.namespace' must be set. * If the passwordlessEnabled is true, it will try to authenticate connections with Azure AD. */ private boolean passwordlessEnabled = false; /** * Whether to enable Servive Bus JMS autoconfiguration. */ private boolean enabled = true; /** * The Service Bus namespace. */ private String namespace; /** * Connection string to connect to a Service Bus namespace. */ private String connectionString; /** * Service Bus topic client ID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; /** * Connection idle timeout duration that how long the client expects Service Bus to keep a connection alive when no messages delivered. * @see <a href="http: * @see <a href="https: */ private Duration idleTimeout = Duration.ofMinutes(2); /** * Pricing tier for a Service Bus namespace. */ private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); /** * Whether to enable Service Bus JMS autoconfiguration. * @return Whether to enable Service Bus autoconfiguration */ public boolean isEnabled() { return enabled; } /** * Set whether to enable Service Bus JMS autoconfiguation. * @param enabled whether to enable Service Bus autoconfiguration. */ public void setEnabled(boolean enabled) { this.enabled = enabled; } /** * The properties for a pooled connection factory. * @return the properties for a pooled connection factory. */ public JmsPoolConnectionFactoryProperties getPool() { return pool; } /** * Get the connection string to connect to a Service Bus namespace. * @return the connection string to connect to a Service Bus namespace. */ public String getConnectionString() { return connectionString; } /** * Set the connection string to connect to a Service Bus namespace. * @param connectionString the connection string to connect to a Service Bus namespace. */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Get the Service Bus topic client ID. * @return the Service Bus topic client ID. */ public String getTopicClientId() { return topicClientId; } /** * Set the Service Bus topic client ID. * @param topicClientId the Service Bus topic client ID. */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Get the pricing tier for a Service Bus namespace. * @return the pricing tier for a Service Bus namespace. */ public String getPricingTier() { return this.pricingTier; } /** * Set the pricing tier for a Service Bus namespace. * @param pricingTier the pricing tier for a Service Bus namespace. */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Get the connection idle timeout duration. * @return the connection idle timeout duration. */ public Duration getIdleTimeout() { return idleTimeout; } /** * Set the connection idle timeout duration. * @param idleTimeout the connection idle timeout duration. */ public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } /** * Get the listener related properties. * @return the listener related properties. */ public Listener getListener() { return listener; } /** * Get the prefetch policy related properties. * @return the prefetch policy related properties. */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Get the Service Bus namespace. * @return the Service Bus namespace. */ public String getNamespace() { return namespace; } /** * Set the Service Bus namespace. * @param namespace the Service Bus namespace. */ public void setNamespace(String namespace) { this.namespace = namespace; } /** * Get the scopes required for the access token. * * @return scopes required for the access token */ @Override public String getScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } /** * Set the scopes required for the access token. * * @param scopes the scopes required for the access token */ public void setScopes(String scopes) { this.scopes = scopes; } /** * Whether to enable connections authenticating with Azure AD, default is false. * * @return enable connections authenticating with Azure AD if true, otherwise false. */ @Override public boolean isPasswordlessEnabled() { return passwordlessEnabled; } /** * Set the value to enable/disable connections authenticating with Azure AD. * If not set, by default the value is false. * * @param passwordlessEnabled the passwordlessEnabled */ public void setPasswordlessEnabled(boolean passwordlessEnabled) { this.passwordlessEnabled = passwordlessEnabled; } /** * Get the profile * @return the profile */ @Override public AzureProfileConfigurationProperties getProfile() { return profile; } /** * Set the profile * @param profile the profile properties related to an Azure subscription */ public void setProfile(AzureProfileConfigurationProperties profile) { this.profile = profile; } /** * Get the credential properties. * * @return the credential properties. */ @Override public TokenCredentialConfigurationProperties getCredential() { return credential; } /** * Set the credential properties. * * @param credential the credential properties */ public void setCredential(TokenCredentialConfigurationProperties credential) { this.credential = credential; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { /** * Fallback value for prefetch option in this Service Bus namespace. */ private int all = 0; /** * The number of prefetch for durable topic. */ private int durableTopicPrefetch = 0; /** * The number of prefetch for queue browser. */ private int queueBrowserPrefetch = 0; /** * The number of prefetch for queue. */ private int queuePrefetch = 0; /** * The number of prefetch for topic. */ private int topicPrefetch = 0; /** * Gets the all prefetch value. * * @return The all prefect value. */ public int getAll() { return Math.max(all, 0); } /** * Sets the all prefetch value. * * @param all The all prefetch value. */ public void setAll(int all) { this.all = all; } /** * Gets the durable topic prefetch value. * * @return The durable topic prefetch value. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * Sets the durable topic prefetch value. * * @param durableTopicPrefetch The durable topic prefetch value. */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * Gets the queue browser prefetch value. * * @return The queue browser prefetch value. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * Sets the queue browser prefetch value. * * @param queueBrowserPrefetch The queue browser prefetch value. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * Gets the queue prefetch value. * * @return The queue prefetch value. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * Sets the queue prefetch value. * * @param queuePrefetch The queue prefetch value. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * Gets the topic prefetch value. * * @return The topic prefetch value. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * Sets the topic prefetch value. * * @param topicPrefetch The topic prefetch value. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * The QosSettings to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * The phase in which this container should be started and stopped. */ private Integer phase; /** * Whether reply destination type is topic. * * @return Whether reply destination type is topic. */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether reply destination is topic. * * @param replyPubSubDomain Whether reply destination is topic. */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return The reply QoS settings. */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings The reply QoS settings. */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether the subscription is durable. * * @return Whether the subscription is durable. */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether the subscription is durable. * * @param subscriptionDurable Whether the subscription is durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether the subscription is shared. * * @return Whether the subscription is shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether the subscription is shared. * * @param subscriptionShared Whether the subscription is shared. */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase. * * @return The phase. */ public Integer getPhase() { return phase; } /** * Sets the phase. * * @param phase The phase. */ public void setPhase(Integer phase) { this.phase = phase; } } private String getDefaultScopes() { return SERVICEBUS_SCOPE_MAP.getOrDefault(getProfile().getCloudType(), SERVICE_BUS_SCOPE_AZURE); } }
class AzureServiceBusJmsProperties implements InitializingBean, PasswordlessProperties { /** * Service Bus JMS properties prefix. */ public static final String PREFIX = "spring.jms.servicebus"; private static final String SERVICE_BUS_SCOPE_AZURE = "https: private static final String SERVICE_BUS_SCOPE_AZURE_CHINA = SERVICE_BUS_SCOPE_AZURE; /** * @deprecated AZURE_GERMANY is deprecated. Please use other Service Bus scopes. */ @Deprecated private static final String SERVICE_BUS_SCOPE_AZURE_GERMANY = SERVICE_BUS_SCOPE_AZURE; private static final String SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT = SERVICE_BUS_SCOPE_AZURE; private static final Map<CloudType, String> SERVICEBUS_SCOPE_MAP = new HashMap<CloudType, String>() { { put(CloudType.AZURE, SERVICE_BUS_SCOPE_AZURE); put(CloudType.AZURE_CHINA, SERVICE_BUS_SCOPE_AZURE_CHINA); put(CloudType.AZURE_US_GOVERNMENT, SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT); } }; private AzureProfileConfigurationProperties profile = new AzureProfileConfigurationProperties(); /** * The scopes required for the access token. */ private String scopes; private TokenCredentialConfigurationProperties credential = new TokenCredentialConfigurationProperties(); /** * Whether to enable supporting azure identity token credentials. * * If the value is true, then 'spring.jms.servicebus.namespace' must be set. * If the passwordlessEnabled is true, it will try to authenticate connections with Azure AD. */ private boolean passwordlessEnabled = false; /** * Whether to enable Servive Bus JMS autoconfiguration. */ private boolean enabled = true; /** * The Service Bus namespace. */ private String namespace; /** * Connection string to connect to a Service Bus namespace. */ private String connectionString; /** * Service Bus topic client ID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; /** * Connection idle timeout duration that how long the client expects Service Bus to keep a connection alive when no messages delivered. * @see <a href="http: * @see <a href="https: */ private Duration idleTimeout = Duration.ofMinutes(2); /** * Pricing tier for a Service Bus namespace. */ private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); /** * Whether to enable Service Bus JMS autoconfiguration. * @return Whether to enable Service Bus autoconfiguration */ public boolean isEnabled() { return enabled; } /** * Set whether to enable Service Bus JMS autoconfiguation. * @param enabled whether to enable Service Bus autoconfiguration. */ public void setEnabled(boolean enabled) { this.enabled = enabled; } /** * The properties for a pooled connection factory. * @return the properties for a pooled connection factory. */ public JmsPoolConnectionFactoryProperties getPool() { return pool; } /** * Get the connection string to connect to a Service Bus namespace. * @return the connection string to connect to a Service Bus namespace. */ public String getConnectionString() { return connectionString; } /** * Set the connection string to connect to a Service Bus namespace. * @param connectionString the connection string to connect to a Service Bus namespace. */ public void setConnectionString(String connectionString) { this.connectionString = connectionString; } /** * Get the Service Bus topic client ID. * @return the Service Bus topic client ID. */ public String getTopicClientId() { return topicClientId; } /** * Set the Service Bus topic client ID. * @param topicClientId the Service Bus topic client ID. */ public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } /** * Get the pricing tier for a Service Bus namespace. * @return the pricing tier for a Service Bus namespace. */ public String getPricingTier() { return this.pricingTier; } /** * Set the pricing tier for a Service Bus namespace. * @param pricingTier the pricing tier for a Service Bus namespace. */ public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } /** * Get the connection idle timeout duration. * @return the connection idle timeout duration. */ public Duration getIdleTimeout() { return idleTimeout; } /** * Set the connection idle timeout duration. * @param idleTimeout the connection idle timeout duration. */ public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } /** * Get the listener related properties. * @return the listener related properties. */ public Listener getListener() { return listener; } /** * Get the prefetch policy related properties. * @return the prefetch policy related properties. */ public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } /** * Get the Service Bus namespace. * @return the Service Bus namespace. */ public String getNamespace() { return namespace; } /** * Set the Service Bus namespace. * @param namespace the Service Bus namespace. */ public void setNamespace(String namespace) { this.namespace = namespace; } /** * Get the scopes required for the access token. * * @return scopes required for the access token */ @Override public String getScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } /** * Set the scopes required for the access token. * * @param scopes the scopes required for the access token */ public void setScopes(String scopes) { this.scopes = scopes; } /** * Whether to enable connections authenticating with Azure AD, default is false. * * @return enable connections authenticating with Azure AD if true, otherwise false. */ @Override public boolean isPasswordlessEnabled() { return passwordlessEnabled; } /** * Set the value to enable/disable connections authenticating with Azure AD. * If not set, by default the value is false. * * @param passwordlessEnabled the passwordlessEnabled */ public void setPasswordlessEnabled(boolean passwordlessEnabled) { this.passwordlessEnabled = passwordlessEnabled; } /** * Get the profile * @return the profile */ @Override public AzureProfileConfigurationProperties getProfile() { return profile; } /** * Set the profile * @param profile the profile properties related to an Azure subscription */ public void setProfile(AzureProfileConfigurationProperties profile) { this.profile = profile; } /** * Get the credential properties. * * @return the credential properties. */ @Override public TokenCredentialConfigurationProperties getCredential() { return credential; } /** * Set the credential properties. * * @param credential the credential properties */ public void setCredential(TokenCredentialConfigurationProperties credential) { this.credential = credential; } /** * Validate spring.jms.servicebus related properties. * * @throws IllegalArgumentException If connectionString is empty. */ @Override /** * Properties to configure {@link org.apache.qpid.jms.policy.JmsDefaultPrefetchPolicy} for {@link * org.apache.qpid.jms.JmsConnectionFactory} . */ public static class PrefetchPolicy { /** * Fallback value for prefetch option in this Service Bus namespace. */ private int all = 0; /** * The number of prefetch for durable topic. */ private int durableTopicPrefetch = 0; /** * The number of prefetch for queue browser. */ private int queueBrowserPrefetch = 0; /** * The number of prefetch for queue. */ private int queuePrefetch = 0; /** * The number of prefetch for topic. */ private int topicPrefetch = 0; /** * Gets the all prefetch value. * * @return The all prefect value. */ public int getAll() { return Math.max(all, 0); } /** * Sets the all prefetch value. * * @param all The all prefetch value. */ public void setAll(int all) { this.all = all; } /** * Gets the durable topic prefetch value. * * @return The durable topic prefetch value. */ public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } /** * Sets the durable topic prefetch value. * * @param durableTopicPrefetch The durable topic prefetch value. */ public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } /** * Gets the queue browser prefetch value. * * @return The queue browser prefetch value. */ public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } /** * Sets the queue browser prefetch value. * * @param queueBrowserPrefetch The queue browser prefetch value. */ public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } /** * Gets the queue prefetch value. * * @return The queue prefetch value. */ public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } /** * Sets the queue prefetch value. * * @param queuePrefetch The queue prefetch value. */ public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } /** * Gets the topic prefetch value. * * @return The topic prefetch value. */ public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } /** * Sets the topic prefetch value. * * @param topicPrefetch The topic prefetch value. */ public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } /** * Properties to configure {@link org.springframework.jms.annotation.JmsListener} for {@link * org.springframework.jms.config.AbstractJmsListenerContainerFactory}. */ public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * The QosSettings to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * The phase in which this container should be started and stopped. */ private Integer phase; /** * Whether reply destination type is topic. * * @return Whether reply destination type is topic. */ public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } /** * Sets whether reply destination is topic. * * @param replyPubSubDomain Whether reply destination is topic. */ public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } /** * Gets the reply QoS settings. * * @return The reply QoS settings. */ public QosSettings getReplyQosSettings() { return replyQosSettings; } /** * Sets the reply QoS settings. * * @param replyQosSettings The reply QoS settings. */ public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } /** * Whether the subscription is durable. * * @return Whether the subscription is durable. */ public Boolean isSubscriptionDurable() { return subscriptionDurable; } /** * Sets whether the subscription is durable. * * @param subscriptionDurable Whether the subscription is durable. */ public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } /** * Whether the subscription is shared. * * @return Whether the subscription is shared. */ public Boolean isSubscriptionShared() { return subscriptionShared; } /** * Sets whether the subscription is shared. * * @param subscriptionShared Whether the subscription is shared. */ public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } /** * Gets the phase. * * @return The phase. */ public Integer getPhase() { return phase; } /** * Sets the phase. * * @param phase The phase. */ public void setPhase(Integer phase) { this.phase = phase; } } private String getDefaultScopes() { return SERVICEBUS_SCOPE_MAP.getOrDefault(getProfile().getCloudType(), SERVICE_BUS_SCOPE_AZURE); } }
Comment seems irrelevant?
public void canCreateAndUpdateManagedDiskWithHyperVGeneration() { Disk disk = computeManager .disks() .define(generateRandomResourceName("disk", 15)) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .withHyperVGeneration(HyperVGeneration.V1) .create(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V1); disk.update().withHyperVGeneration(HyperVGeneration.V2).apply(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V2); }
public void canCreateAndUpdateManagedDiskWithHyperVGeneration() { Disk disk = computeManager .disks() .define(generateRandomResourceName("disk", 15)) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .withHyperVGeneration(HyperVGeneration.V1) .create(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V1); disk.update().withHyperVGeneration(HyperVGeneration.V2).apply(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V2); }
class ManagedDiskOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = null; private Region region = Region.US_WEST_CENTRAL; private Region region2 = Region.US_EAST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); if (rgName2 != null) { resourceManager.resourceGroups().beginDeleteByName(rgName2); } } @Test public void canOperateOnEmptyManagedDisk() { final String diskName = generateRandomResourceName("md-empty-", 20); final DiskSkuTypes updateTo = DiskSkuTypes.STANDARD_LRS; ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .withSku(DiskSkuTypes.STANDARD_LRS) .withTag("tkey1", "tval1") .create(); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.EMPTY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 100); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.EMPTY); Assertions.assertNull(disk.source().sourceId()); disk = disk.update().withSku(updateTo).withSizeInGB(200).apply(); Assertions.assertEquals(disk.sku(), updateTo); Assertions.assertEquals(disk.sizeInGB(), 200); disk = computeManager.disks().getByResourceGroup(disk.resourceGroupName(), disk.name()); Assertions.assertNotNull(disk); PagedIterable<Disk> myDisks = computeManager.disks().listByResourceGroup(disk.resourceGroupName()); Assertions.assertNotNull(myDisks); Assertions.assertTrue(TestUtilities.getSize(myDisks) > 0); String sasUrl = disk.grantAccess(100); Assertions.assertTrue(sasUrl != null && sasUrl != ""); disk.revokeAccess(); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromDisk() { final String diskName1 = generateRandomResourceName("md-1", 20); final String diskName2 = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(diskName1) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .create(); Disk disk = computeManager .disks() .define(diskName2) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .fromDisk(emptyDisk) .withSizeInGB(200) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName2)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.COPY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 200); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(disk.source().sourceId().equalsIgnoreCase(emptyDisk.id())); computeManager.disks().deleteById(emptyDisk.id()); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromUpload() { final String diskName = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withUploadSizeInMB(1000) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.UPLOAD); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 0); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.UNKNOWN); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromSnapshot() { final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSizeInGB(200) .withSku(SnapshotSkuType.STANDARD_LRS) .create(); Assertions.assertNotNull(snapshot.id()); Assertions.assertTrue(snapshot.name().equalsIgnoreCase(snapshotName)); Assertions.assertEquals(snapshot.skuType().toString(), DiskSkuTypes.STANDARD_LRS.toString()); Assertions.assertEquals(snapshot.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(snapshot.sizeInGB(), 200); Assertions.assertNull(snapshot.osType()); Assertions.assertNotNull(snapshot.source()); Assertions.assertEquals(snapshot.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(snapshot.source().sourceId().equalsIgnoreCase(emptyDisk.id())); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .fromSnapshot(snapshot) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshot.id())); } @DoNotRecord(skipInPlayback = true) @Test public void canCopyStartIncrementalSnapshot() { rgName2 = generateRandomResourceName("rg", 15); final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); final String snapshotName2 = generateRandomResourceName("snp-", 20); final String newRegionSnapshotName = generateRandomResourceName("snp-newregion-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-newregion-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); ResourceGroup resourceGroup2 = resourceManager.resourceGroups().define(rgName2).withRegion(region2).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSku(SnapshotSkuType.STANDARD_LRS) .withIncremental(true) .create(); Assertions.assertTrue(snapshot.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_DISK, snapshot.source().type()); Assertions.assertEquals(DiskCreateOption.COPY, snapshot.creationMethod()); Assertions.assertThrows(IllegalStateException.class, snapshot::awaitCopyStartCompletion); Snapshot snapshotSameRegion = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertTrue(snapshotSameRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotSameRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotSameRegion.creationMethod()); Assertions.assertNull(snapshotSameRegion.copyCompletionError()); Assertions.assertNotEquals(100, snapshotSameRegion.copyCompletionPercent()); computeManager .snapshots() .deleteById(snapshotSameRegion.id()); Snapshot snapshotSameRegion2 = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertFalse(!isPlaybackMode() && snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofMillis(1))); Assertions.assertTrue(snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofHours(24))); Snapshot snapshotNewRegion = computeManager .snapshots() .define(newRegionSnapshotName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); snapshotNewRegion.awaitCopyStartCompletion(); Assertions.assertTrue(snapshotNewRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotNewRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotNewRegion.creationMethod()); Assertions.assertEquals(100, snapshotNewRegion.copyCompletionPercent()); Assertions.assertNull(snapshotNewRegion.copyCompletionError()); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withData() .fromSnapshot(snapshotNewRegion) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshotNewRegion.id())); } @Test public void canCreateWithLogicalSectorSize() { String diskName = generateRandomResourceName("disk", 15); Disk defaultDisk = computeManager .disks() .define("default_disk") .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); defaultDisk.refresh(); Assertions.assertNull(defaultDisk.logicalSectorSizeInBytes()); Disk disk = computeManager .disks() .define(diskName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10) .withSku(DiskSkuTypes.PREMIUM_V2_LRS) .withLogicalSectorSizeInBytes(512) .create(); disk.refresh(); Assertions.assertEquals(DiskSkuTypes.PREMIUM_V2_LRS, disk.sku()); Assertions.assertEquals(512, disk.logicalSectorSizeInBytes()); } @Test }
class ManagedDiskOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = null; private Region region = Region.US_WEST_CENTRAL; private Region region2 = Region.US_EAST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); if (rgName2 != null) { resourceManager.resourceGroups().beginDeleteByName(rgName2); } } @Test public void canOperateOnEmptyManagedDisk() { final String diskName = generateRandomResourceName("md-empty-", 20); final DiskSkuTypes updateTo = DiskSkuTypes.STANDARD_LRS; ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .withSku(DiskSkuTypes.STANDARD_LRS) .withTag("tkey1", "tval1") .create(); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.EMPTY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 100); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.EMPTY); Assertions.assertNull(disk.source().sourceId()); disk = disk.update().withSku(updateTo).withSizeInGB(200).apply(); Assertions.assertEquals(disk.sku(), updateTo); Assertions.assertEquals(disk.sizeInGB(), 200); disk = computeManager.disks().getByResourceGroup(disk.resourceGroupName(), disk.name()); Assertions.assertNotNull(disk); PagedIterable<Disk> myDisks = computeManager.disks().listByResourceGroup(disk.resourceGroupName()); Assertions.assertNotNull(myDisks); Assertions.assertTrue(TestUtilities.getSize(myDisks) > 0); String sasUrl = disk.grantAccess(100); Assertions.assertTrue(sasUrl != null && sasUrl != ""); disk.revokeAccess(); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromDisk() { final String diskName1 = generateRandomResourceName("md-1", 20); final String diskName2 = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(diskName1) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .create(); Disk disk = computeManager .disks() .define(diskName2) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .fromDisk(emptyDisk) .withSizeInGB(200) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName2)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.COPY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 200); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(disk.source().sourceId().equalsIgnoreCase(emptyDisk.id())); computeManager.disks().deleteById(emptyDisk.id()); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromUpload() { final String diskName = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withUploadSizeInMB(1000) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.UPLOAD); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 0); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.UNKNOWN); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromSnapshot() { final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSizeInGB(200) .withSku(SnapshotSkuType.STANDARD_LRS) .create(); Assertions.assertNotNull(snapshot.id()); Assertions.assertTrue(snapshot.name().equalsIgnoreCase(snapshotName)); Assertions.assertEquals(snapshot.skuType().toString(), DiskSkuTypes.STANDARD_LRS.toString()); Assertions.assertEquals(snapshot.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(snapshot.sizeInGB(), 200); Assertions.assertNull(snapshot.osType()); Assertions.assertNotNull(snapshot.source()); Assertions.assertEquals(snapshot.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(snapshot.source().sourceId().equalsIgnoreCase(emptyDisk.id())); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .fromSnapshot(snapshot) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshot.id())); } @DoNotRecord(skipInPlayback = true) @Test public void canCopyStartIncrementalSnapshot() { rgName2 = generateRandomResourceName("rg", 15); final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); final String snapshotName2 = generateRandomResourceName("snp-", 20); final String newRegionSnapshotName = generateRandomResourceName("snp-newregion-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-newregion-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); ResourceGroup resourceGroup2 = resourceManager.resourceGroups().define(rgName2).withRegion(region2).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSku(SnapshotSkuType.STANDARD_LRS) .withIncremental(true) .create(); Assertions.assertTrue(snapshot.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_DISK, snapshot.source().type()); Assertions.assertEquals(DiskCreateOption.COPY, snapshot.creationMethod()); Assertions.assertThrows(IllegalStateException.class, snapshot::awaitCopyStartCompletion); Snapshot snapshotSameRegion = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertTrue(snapshotSameRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotSameRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotSameRegion.creationMethod()); Assertions.assertNull(snapshotSameRegion.copyCompletionError()); Assertions.assertNotEquals(100, snapshotSameRegion.copyCompletionPercent()); computeManager .snapshots() .deleteById(snapshotSameRegion.id()); Snapshot snapshotSameRegion2 = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertFalse(!isPlaybackMode() && snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofMillis(1))); Assertions.assertTrue(snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofHours(24))); Snapshot snapshotNewRegion = computeManager .snapshots() .define(newRegionSnapshotName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); snapshotNewRegion.awaitCopyStartCompletion(); Assertions.assertTrue(snapshotNewRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotNewRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotNewRegion.creationMethod()); Assertions.assertEquals(100, snapshotNewRegion.copyCompletionPercent()); Assertions.assertNull(snapshotNewRegion.copyCompletionError()); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withData() .fromSnapshot(snapshotNewRegion) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshotNewRegion.id())); } @Test public void canCreateWithLogicalSectorSize() { String diskName = generateRandomResourceName("disk", 15); Disk defaultDisk = computeManager .disks() .define("default_disk") .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); defaultDisk.refresh(); Assertions.assertNull(defaultDisk.logicalSectorSizeInBytes()); Disk disk = computeManager .disks() .define(diskName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10) .withSku(DiskSkuTypes.PREMIUM_V2_LRS) .withLogicalSectorSizeInBytes(512) .create(); disk.refresh(); Assertions.assertEquals(DiskSkuTypes.PREMIUM_V2_LRS, disk.sku()); Assertions.assertEquals(512, disk.logicalSectorSizeInBytes()); } @Test }
Fixed in the new version
public void canCreateAndUpdateManagedDiskWithHyperVGeneration() { Disk disk = computeManager .disks() .define(generateRandomResourceName("disk", 15)) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .withHyperVGeneration(HyperVGeneration.V1) .create(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V1); disk.update().withHyperVGeneration(HyperVGeneration.V2).apply(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V2); }
public void canCreateAndUpdateManagedDiskWithHyperVGeneration() { Disk disk = computeManager .disks() .define(generateRandomResourceName("disk", 15)) .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .withHyperVGeneration(HyperVGeneration.V1) .create(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V1); disk.update().withHyperVGeneration(HyperVGeneration.V2).apply(); disk.refresh(); Assertions.assertEquals(disk.hyperVGeneration(), HyperVGeneration.V2); }
class ManagedDiskOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = null; private Region region = Region.US_WEST_CENTRAL; private Region region2 = Region.US_EAST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); if (rgName2 != null) { resourceManager.resourceGroups().beginDeleteByName(rgName2); } } @Test public void canOperateOnEmptyManagedDisk() { final String diskName = generateRandomResourceName("md-empty-", 20); final DiskSkuTypes updateTo = DiskSkuTypes.STANDARD_LRS; ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .withSku(DiskSkuTypes.STANDARD_LRS) .withTag("tkey1", "tval1") .create(); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.EMPTY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 100); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.EMPTY); Assertions.assertNull(disk.source().sourceId()); disk = disk.update().withSku(updateTo).withSizeInGB(200).apply(); Assertions.assertEquals(disk.sku(), updateTo); Assertions.assertEquals(disk.sizeInGB(), 200); disk = computeManager.disks().getByResourceGroup(disk.resourceGroupName(), disk.name()); Assertions.assertNotNull(disk); PagedIterable<Disk> myDisks = computeManager.disks().listByResourceGroup(disk.resourceGroupName()); Assertions.assertNotNull(myDisks); Assertions.assertTrue(TestUtilities.getSize(myDisks) > 0); String sasUrl = disk.grantAccess(100); Assertions.assertTrue(sasUrl != null && sasUrl != ""); disk.revokeAccess(); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromDisk() { final String diskName1 = generateRandomResourceName("md-1", 20); final String diskName2 = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(diskName1) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .create(); Disk disk = computeManager .disks() .define(diskName2) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .fromDisk(emptyDisk) .withSizeInGB(200) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName2)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.COPY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 200); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(disk.source().sourceId().equalsIgnoreCase(emptyDisk.id())); computeManager.disks().deleteById(emptyDisk.id()); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromUpload() { final String diskName = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withUploadSizeInMB(1000) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.UPLOAD); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 0); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.UNKNOWN); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromSnapshot() { final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSizeInGB(200) .withSku(SnapshotSkuType.STANDARD_LRS) .create(); Assertions.assertNotNull(snapshot.id()); Assertions.assertTrue(snapshot.name().equalsIgnoreCase(snapshotName)); Assertions.assertEquals(snapshot.skuType().toString(), DiskSkuTypes.STANDARD_LRS.toString()); Assertions.assertEquals(snapshot.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(snapshot.sizeInGB(), 200); Assertions.assertNull(snapshot.osType()); Assertions.assertNotNull(snapshot.source()); Assertions.assertEquals(snapshot.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(snapshot.source().sourceId().equalsIgnoreCase(emptyDisk.id())); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .fromSnapshot(snapshot) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshot.id())); } @DoNotRecord(skipInPlayback = true) @Test public void canCopyStartIncrementalSnapshot() { rgName2 = generateRandomResourceName("rg", 15); final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); final String snapshotName2 = generateRandomResourceName("snp-", 20); final String newRegionSnapshotName = generateRandomResourceName("snp-newregion-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-newregion-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); ResourceGroup resourceGroup2 = resourceManager.resourceGroups().define(rgName2).withRegion(region2).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSku(SnapshotSkuType.STANDARD_LRS) .withIncremental(true) .create(); Assertions.assertTrue(snapshot.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_DISK, snapshot.source().type()); Assertions.assertEquals(DiskCreateOption.COPY, snapshot.creationMethod()); Assertions.assertThrows(IllegalStateException.class, snapshot::awaitCopyStartCompletion); Snapshot snapshotSameRegion = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertTrue(snapshotSameRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotSameRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotSameRegion.creationMethod()); Assertions.assertNull(snapshotSameRegion.copyCompletionError()); Assertions.assertNotEquals(100, snapshotSameRegion.copyCompletionPercent()); computeManager .snapshots() .deleteById(snapshotSameRegion.id()); Snapshot snapshotSameRegion2 = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertFalse(!isPlaybackMode() && snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofMillis(1))); Assertions.assertTrue(snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofHours(24))); Snapshot snapshotNewRegion = computeManager .snapshots() .define(newRegionSnapshotName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); snapshotNewRegion.awaitCopyStartCompletion(); Assertions.assertTrue(snapshotNewRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotNewRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotNewRegion.creationMethod()); Assertions.assertEquals(100, snapshotNewRegion.copyCompletionPercent()); Assertions.assertNull(snapshotNewRegion.copyCompletionError()); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withData() .fromSnapshot(snapshotNewRegion) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshotNewRegion.id())); } @Test public void canCreateWithLogicalSectorSize() { String diskName = generateRandomResourceName("disk", 15); Disk defaultDisk = computeManager .disks() .define("default_disk") .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); defaultDisk.refresh(); Assertions.assertNull(defaultDisk.logicalSectorSizeInBytes()); Disk disk = computeManager .disks() .define(diskName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10) .withSku(DiskSkuTypes.PREMIUM_V2_LRS) .withLogicalSectorSizeInBytes(512) .create(); disk.refresh(); Assertions.assertEquals(DiskSkuTypes.PREMIUM_V2_LRS, disk.sku()); Assertions.assertEquals(512, disk.logicalSectorSizeInBytes()); } @Test }
class ManagedDiskOperationsTests extends ComputeManagementTest { private String rgName = ""; private String rgName2 = null; private Region region = Region.US_WEST_CENTRAL; private Region region2 = Region.US_EAST; @Override protected void initializeClients(HttpPipeline httpPipeline, AzureProfile profile) { rgName = generateRandomResourceName("javacsmrg", 15); super.initializeClients(httpPipeline, profile); } @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); if (rgName2 != null) { resourceManager.resourceGroups().beginDeleteByName(rgName2); } } @Test public void canOperateOnEmptyManagedDisk() { final String diskName = generateRandomResourceName("md-empty-", 20); final DiskSkuTypes updateTo = DiskSkuTypes.STANDARD_LRS; ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .withSku(DiskSkuTypes.STANDARD_LRS) .withTag("tkey1", "tval1") .create(); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.EMPTY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 100); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.EMPTY); Assertions.assertNull(disk.source().sourceId()); disk = disk.update().withSku(updateTo).withSizeInGB(200).apply(); Assertions.assertEquals(disk.sku(), updateTo); Assertions.assertEquals(disk.sizeInGB(), 200); disk = computeManager.disks().getByResourceGroup(disk.resourceGroupName(), disk.name()); Assertions.assertNotNull(disk); PagedIterable<Disk> myDisks = computeManager.disks().listByResourceGroup(disk.resourceGroupName()); Assertions.assertNotNull(myDisks); Assertions.assertTrue(TestUtilities.getSize(myDisks) > 0); String sasUrl = disk.grantAccess(100); Assertions.assertTrue(sasUrl != null && sasUrl != ""); disk.revokeAccess(); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromDisk() { final String diskName1 = generateRandomResourceName("md-1", 20); final String diskName2 = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(diskName1) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withSizeInGB(100) .create(); Disk disk = computeManager .disks() .define(diskName2) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .fromDisk(emptyDisk) .withSizeInGB(200) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName2)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.COPY); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 200); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(disk.source().sourceId().equalsIgnoreCase(emptyDisk.id())); computeManager.disks().deleteById(emptyDisk.id()); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromUpload() { final String diskName = generateRandomResourceName("md-2", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk disk = computeManager .disks() .define(diskName) .withRegion(region) .withExistingResourceGroup(resourceGroup.name()) .withData() .withUploadSizeInMB(1000) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); disk = computeManager.disks().getById(disk.id()); Assertions.assertNotNull(disk.id()); Assertions.assertTrue(disk.name().equalsIgnoreCase(diskName)); Assertions.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(disk.creationMethod(), DiskCreateOption.UPLOAD); Assertions.assertFalse(disk.isAttachedToVirtualMachine()); Assertions.assertEquals(disk.sizeInGB(), 0); Assertions.assertNull(disk.osType()); Assertions.assertNotNull(disk.source()); Assertions.assertEquals(disk.source().type(), CreationSourceType.UNKNOWN); computeManager.disks().deleteById(disk.id()); } @Test public void canOperateOnManagedDiskFromSnapshot() { final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSizeInGB(200) .withSku(SnapshotSkuType.STANDARD_LRS) .create(); Assertions.assertNotNull(snapshot.id()); Assertions.assertTrue(snapshot.name().equalsIgnoreCase(snapshotName)); Assertions.assertEquals(snapshot.skuType().toString(), DiskSkuTypes.STANDARD_LRS.toString()); Assertions.assertEquals(snapshot.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(snapshot.sizeInGB(), 200); Assertions.assertNull(snapshot.osType()); Assertions.assertNotNull(snapshot.source()); Assertions.assertEquals(snapshot.source().type(), CreationSourceType.COPIED_FROM_DISK); Assertions.assertTrue(snapshot.source().sourceId().equalsIgnoreCase(emptyDisk.id())); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .fromSnapshot(snapshot) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshot.id())); } @DoNotRecord(skipInPlayback = true) @Test public void canCopyStartIncrementalSnapshot() { rgName2 = generateRandomResourceName("rg", 15); final String emptyDiskName = generateRandomResourceName("md-empty-", 20); final String snapshotName = generateRandomResourceName("snp-", 20); final String snapshotName2 = generateRandomResourceName("snp-", 20); final String newRegionSnapshotName = generateRandomResourceName("snp-newregion-", 20); final String snapshotBasedDiskName = generateRandomResourceName("md-snp-newregion-", 20); ResourceGroup resourceGroup = resourceManager.resourceGroups().define(rgName).withRegion(region).create(); ResourceGroup resourceGroup2 = resourceManager.resourceGroups().define(rgName2).withRegion(region2).create(); Disk emptyDisk = computeManager .disks() .define(emptyDiskName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withData() .withSizeInGB(100) .create(); Snapshot snapshot = computeManager .snapshots() .define(snapshotName) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromDisk(emptyDisk) .withSku(SnapshotSkuType.STANDARD_LRS) .withIncremental(true) .create(); Assertions.assertTrue(snapshot.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_DISK, snapshot.source().type()); Assertions.assertEquals(DiskCreateOption.COPY, snapshot.creationMethod()); Assertions.assertThrows(IllegalStateException.class, snapshot::awaitCopyStartCompletion); Snapshot snapshotSameRegion = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertTrue(snapshotSameRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotSameRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotSameRegion.creationMethod()); Assertions.assertNull(snapshotSameRegion.copyCompletionError()); Assertions.assertNotEquals(100, snapshotSameRegion.copyCompletionPercent()); computeManager .snapshots() .deleteById(snapshotSameRegion.id()); Snapshot snapshotSameRegion2 = computeManager .snapshots() .define(snapshotName2) .withRegion(region) .withExistingResourceGroup(resourceGroup) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); Assertions.assertFalse(!isPlaybackMode() && snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofMillis(1))); Assertions.assertTrue(snapshotSameRegion2.awaitCopyStartCompletion(Duration.ofHours(24))); Snapshot snapshotNewRegion = computeManager .snapshots() .define(newRegionSnapshotName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withDataFromSnapshot(snapshot) .withCopyStart() .withIncremental(true) .create(); snapshotNewRegion.awaitCopyStartCompletion(); Assertions.assertTrue(snapshotNewRegion.incremental()); Assertions.assertEquals(CreationSourceType.COPIED_FROM_SNAPSHOT, snapshotNewRegion.source().type()); Assertions.assertEquals(DiskCreateOption.COPY_START, snapshotNewRegion.creationMethod()); Assertions.assertEquals(100, snapshotNewRegion.copyCompletionPercent()); Assertions.assertNull(snapshotNewRegion.copyCompletionError()); Disk fromSnapshotDisk = computeManager .disks() .define(snapshotBasedDiskName) .withRegion(region2) .withExistingResourceGroup(resourceGroup2) .withData() .fromSnapshot(snapshotNewRegion) .withSizeInGB(300) .create(); Assertions.assertNotNull(fromSnapshotDisk.id()); Assertions.assertTrue(fromSnapshotDisk.name().equalsIgnoreCase(snapshotBasedDiskName)); Assertions.assertEquals(fromSnapshotDisk.sku(), DiskSkuTypes.STANDARD_LRS); Assertions.assertEquals(fromSnapshotDisk.creationMethod(), DiskCreateOption.COPY); Assertions.assertEquals(fromSnapshotDisk.sizeInGB(), 300); Assertions.assertNull(fromSnapshotDisk.osType()); Assertions.assertNotNull(fromSnapshotDisk.source()); Assertions.assertEquals(fromSnapshotDisk.source().type(), CreationSourceType.COPIED_FROM_SNAPSHOT); Assertions.assertTrue(fromSnapshotDisk.source().sourceId().equalsIgnoreCase(snapshotNewRegion.id())); } @Test public void canCreateWithLogicalSectorSize() { String diskName = generateRandomResourceName("disk", 15); Disk defaultDisk = computeManager .disks() .define("default_disk") .withRegion(Region.US_EAST) .withNewResourceGroup(rgName) .withData() .withSizeInGB(1) .withSku(DiskSkuTypes.STANDARD_LRS) .create(); defaultDisk.refresh(); Assertions.assertNull(defaultDisk.logicalSectorSizeInBytes()); Disk disk = computeManager .disks() .define(diskName) .withRegion(Region.US_EAST) .withExistingResourceGroup(rgName) .withData() .withSizeInGB(10) .withSku(DiskSkuTypes.PREMIUM_V2_LRS) .withLogicalSectorSizeInBytes(512) .create(); disk.refresh(); Assertions.assertEquals(DiskSkuTypes.PREMIUM_V2_LRS, disk.sku()); Assertions.assertEquals(512, disk.logicalSectorSizeInBytes()); } @Test }
```suggestion String errMessage = null; if ("basic".equalsIgnoreCase(pricingTier)) { errMessage = "The basic tier is not supported by Service Bus JMS. Please use standard or premium tier instead."; } else { errMessage = "'spring.jms.servicebus.pricing-tier' is not valid."; } ```
public void afterPropertiesSet() throws Exception { if (isPasswordlessEnabled()) { if (!StringUtils.hasText(namespace)) { throw new IllegalArgumentException("Passwordless connections enabled, 'spring.jms.servicebus.namespace' should be provided."); } } else { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided."); } } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard")) { String errMessage = "'spring.jms.servicebus.pricing-tier' is not valid."; if ("basic".equalsIgnoreCase(pricingTier)) { errMessage = "The basic tier is not supported by Service Bus JMS. Please use standard or premium tier instead."; } throw new IllegalArgumentException(errMessage); } }
}
public void afterPropertiesSet() throws Exception { if (isPasswordlessEnabled()) { if (!StringUtils.hasText(namespace)) { throw new IllegalArgumentException("Passwordless connections enabled, 'spring.jms.servicebus.namespace' should be provided."); } } else { if (!StringUtils.hasText(connectionString)) { throw new IllegalArgumentException("'spring.jms.servicebus.connection-string' should be provided."); } } if (null == pricingTier || !pricingTier.matches("(?i)premium|standard")) { String errMessage = null; if ("basic".equalsIgnoreCase(pricingTier)) { errMessage = "The basic tier is not supported by Service Bus JMS. Please use standard or premium tier instead."; } else { errMessage = "'spring.jms.servicebus.pricing-tier' is not valid."; } throw new IllegalArgumentException(errMessage); } }
class AzureServiceBusJmsProperties implements InitializingBean, PasswordlessProperties { /** * Service Bus JMS properties prefix. */ public static final String PREFIX = "spring.jms.servicebus"; private static final String SERVICE_BUS_SCOPE_AZURE = "https: private static final String SERVICE_BUS_SCOPE_AZURE_CHINA = SERVICE_BUS_SCOPE_AZURE; private static final String SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT = SERVICE_BUS_SCOPE_AZURE; private static final Map<CloudType, String> SERVICEBUS_SCOPE_MAP = new HashMap<CloudType, String>() { { put(CloudType.AZURE, SERVICE_BUS_SCOPE_AZURE); put(CloudType.AZURE_CHINA, SERVICE_BUS_SCOPE_AZURE_CHINA); put(CloudType.AZURE_US_GOVERNMENT, SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT); } }; private AzureProfileConfigurationProperties profile = new AzureProfileConfigurationProperties(); /** * The scopes required for the access token. */ private String scopes; private TokenCredentialConfigurationProperties credential = new TokenCredentialConfigurationProperties(); /** * Whether to enable supporting azure identity token credentials. * * If the value is true, then 'spring.jms.servicebus.namespace' must be set. * If the passwordlessEnabled is true, it will try to authenticate connections with Azure AD. */ private boolean passwordlessEnabled = false; /** * Whether to enable Service Bus JMS autoconfiguration. */ private boolean enabled = true; /** * The Service Bus namespace. */ private String namespace; /** * Connection string to connect to a Service Bus namespace. */ private String connectionString; /** * Service Bus topic client ID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; /** * Connection idle timeout duration that how long the client expects Service Bus to keep a connection alive when no messages delivered. * @see <a href="http: * @see <a href="https: */ private Duration idleTimeout = Duration.ofMinutes(2); /** * Pricing tier for a Service Bus namespace. */ private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); public boolean isEnabled() { return enabled; } public void setEnabled(boolean enabled) { this.enabled = enabled; } public JmsPoolConnectionFactoryProperties getPool() { return pool; } public String getConnectionString() { return connectionString; } public void setConnectionString(String connectionString) { this.connectionString = connectionString; } public String getTopicClientId() { return topicClientId; } public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } public String getPricingTier() { return this.pricingTier; } public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } public Duration getIdleTimeout() { return idleTimeout; } public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } public Listener getListener() { return listener; } public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } public String getNamespace() { return namespace; } public void setNamespace(String namespace) { this.namespace = namespace; } @Override public String getScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } public void setScopes(String scopes) { this.scopes = scopes; } @Override public boolean isPasswordlessEnabled() { return passwordlessEnabled; } public void setPasswordlessEnabled(boolean passwordlessEnabled) { this.passwordlessEnabled = passwordlessEnabled; } @Override public AzureProfileConfigurationProperties getProfile() { return profile; } public void setProfile(AzureProfileConfigurationProperties profile) { this.profile = profile; } @Override public TokenCredentialConfigurationProperties getCredential() { return credential; } public void setCredential(TokenCredentialConfigurationProperties credential) { this.credential = credential; } @Override public static class PrefetchPolicy { /** * Fallback value for prefetch option in this Service Bus namespace. */ private int all = 0; /** * The number of prefetch for durable topic. */ private int durableTopicPrefetch = 0; /** * The number of prefetch for queue browser. */ private int queueBrowserPrefetch = 0; /** * The number of prefetch for queue. */ private int queuePrefetch = 0; /** * The number of prefetch for topic. */ private int topicPrefetch = 0; public int getAll() { return Math.max(all, 0); } public void setAll(int all) { this.all = all; } public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * The QosSettings to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * The phase in which this container should be started and stopped. */ private Integer phase; public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } public QosSettings getReplyQosSettings() { return replyQosSettings; } public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } public Boolean isSubscriptionDurable() { return subscriptionDurable; } public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } public Boolean isSubscriptionShared() { return subscriptionShared; } public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } public Integer getPhase() { return phase; } public void setPhase(Integer phase) { this.phase = phase; } } private String getDefaultScopes() { return SERVICEBUS_SCOPE_MAP.getOrDefault(getProfile().getCloudType(), SERVICE_BUS_SCOPE_AZURE); } }
class AzureServiceBusJmsProperties implements InitializingBean, PasswordlessProperties { /** * Service Bus JMS properties prefix. */ public static final String PREFIX = "spring.jms.servicebus"; private static final String SERVICE_BUS_SCOPE_AZURE = "https: private static final String SERVICE_BUS_SCOPE_AZURE_CHINA = SERVICE_BUS_SCOPE_AZURE; private static final String SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT = SERVICE_BUS_SCOPE_AZURE; private static final Map<CloudType, String> SERVICEBUS_SCOPE_MAP = new HashMap<CloudType, String>() { { put(CloudType.AZURE, SERVICE_BUS_SCOPE_AZURE); put(CloudType.AZURE_CHINA, SERVICE_BUS_SCOPE_AZURE_CHINA); put(CloudType.AZURE_US_GOVERNMENT, SERVICE_BUS_SCOPE_AZURE_US_GOVERNMENT); } }; private AzureProfileConfigurationProperties profile = new AzureProfileConfigurationProperties(); /** * The scopes required for the access token. */ private String scopes; private TokenCredentialConfigurationProperties credential = new TokenCredentialConfigurationProperties(); /** * Whether to enable supporting azure identity token credentials. * * If the value is true, then 'spring.jms.servicebus.namespace' must be set. * If the passwordlessEnabled is true, it will try to authenticate connections with Azure AD. */ private boolean passwordlessEnabled = false; /** * Whether to enable Service Bus JMS autoconfiguration. */ private boolean enabled = true; /** * The Service Bus namespace. */ private String namespace; /** * Connection string to connect to a Service Bus namespace. */ private String connectionString; /** * Service Bus topic client ID. Only works for the bean of topicJmsListenerContainerFactory. */ private String topicClientId; /** * Connection idle timeout duration that how long the client expects Service Bus to keep a connection alive when no messages delivered. * @see <a href="http: * @see <a href="https: */ private Duration idleTimeout = Duration.ofMinutes(2); /** * Pricing tier for a Service Bus namespace. */ private String pricingTier; private final Listener listener = new Listener(); private final PrefetchPolicy prefetchPolicy = new PrefetchPolicy(); @NestedConfigurationProperty private final JmsPoolConnectionFactoryProperties pool = new JmsPoolConnectionFactoryProperties(); public boolean isEnabled() { return enabled; } public void setEnabled(boolean enabled) { this.enabled = enabled; } public JmsPoolConnectionFactoryProperties getPool() { return pool; } public String getConnectionString() { return connectionString; } public void setConnectionString(String connectionString) { this.connectionString = connectionString; } public String getTopicClientId() { return topicClientId; } public void setTopicClientId(String topicClientId) { this.topicClientId = topicClientId; } public String getPricingTier() { return this.pricingTier; } public void setPricingTier(String pricingTier) { this.pricingTier = pricingTier; } public Duration getIdleTimeout() { return idleTimeout; } public void setIdleTimeout(Duration idleTimeout) { this.idleTimeout = idleTimeout; } public Listener getListener() { return listener; } public PrefetchPolicy getPrefetchPolicy() { return prefetchPolicy; } public String getNamespace() { return namespace; } public void setNamespace(String namespace) { this.namespace = namespace; } @Override public String getScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } public void setScopes(String scopes) { this.scopes = scopes; } @Override public boolean isPasswordlessEnabled() { return passwordlessEnabled; } public void setPasswordlessEnabled(boolean passwordlessEnabled) { this.passwordlessEnabled = passwordlessEnabled; } @Override public AzureProfileConfigurationProperties getProfile() { return profile; } public void setProfile(AzureProfileConfigurationProperties profile) { this.profile = profile; } @Override public TokenCredentialConfigurationProperties getCredential() { return credential; } public void setCredential(TokenCredentialConfigurationProperties credential) { this.credential = credential; } @Override public static class PrefetchPolicy { /** * Fallback value for prefetch option in this Service Bus namespace. */ private int all = 0; /** * The number of prefetch for durable topic. */ private int durableTopicPrefetch = 0; /** * The number of prefetch for queue browser. */ private int queueBrowserPrefetch = 0; /** * The number of prefetch for queue. */ private int queuePrefetch = 0; /** * The number of prefetch for topic. */ private int topicPrefetch = 0; public int getAll() { return Math.max(all, 0); } public void setAll(int all) { this.all = all; } public int getDurableTopicPrefetch() { return durableTopicPrefetch > 0 ? durableTopicPrefetch : getAll(); } public void setDurableTopicPrefetch(int durableTopicPrefetch) { this.durableTopicPrefetch = durableTopicPrefetch; } public int getQueueBrowserPrefetch() { return queueBrowserPrefetch > 0 ? queueBrowserPrefetch : getAll(); } public void setQueueBrowserPrefetch(int queueBrowserPrefetch) { this.queueBrowserPrefetch = queueBrowserPrefetch; } public int getQueuePrefetch() { return queuePrefetch > 0 ? queuePrefetch : getAll(); } public void setQueuePrefetch(int queuePrefetch) { this.queuePrefetch = queuePrefetch; } public int getTopicPrefetch() { return topicPrefetch > 0 ? topicPrefetch : getAll(); } public void setTopicPrefetch(int topicPrefetch) { this.topicPrefetch = topicPrefetch; } } public static class Listener { /** * Whether the reply destination type is topic. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean replyPubSubDomain; /** * The QosSettings to use when sending a reply. */ private QosSettings replyQosSettings; /** * Whether to make the subscription durable. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionDurable = Boolean.TRUE; /** * Whether to make the subscription shared. Only works for the bean of topicJmsListenerContainerFactory. */ private Boolean subscriptionShared; /** * The phase in which this container should be started and stopped. */ private Integer phase; public Boolean isReplyPubSubDomain() { return replyPubSubDomain; } public void setReplyPubSubDomain(Boolean replyPubSubDomain) { this.replyPubSubDomain = replyPubSubDomain; } public QosSettings getReplyQosSettings() { return replyQosSettings; } public void setReplyQosSettings(QosSettings replyQosSettings) { this.replyQosSettings = replyQosSettings; } public Boolean isSubscriptionDurable() { return subscriptionDurable; } public void setSubscriptionDurable(Boolean subscriptionDurable) { this.subscriptionDurable = subscriptionDurable; } public Boolean isSubscriptionShared() { return subscriptionShared; } public void setSubscriptionShared(Boolean subscriptionShared) { this.subscriptionShared = subscriptionShared; } public Integer getPhase() { return phase; } public void setPhase(Integer phase) { this.phase = phase; } } private String getDefaultScopes() { return SERVICEBUS_SCOPE_MAP.getOrDefault(getProfile().getCloudType(), SERVICE_BUS_SCOPE_AZURE); } }
Does previous api-version fail this test too?
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1 && image != KnownLinuxVirtualMachineImage.SLES_15) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
&& image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO)
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
If these images no longer supported by Compute, you need to give proper javadoc in code. e.g. https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/resourcemanager/azure-resourcemanager-compute/src/main/java/com/azure/resourcemanager/compute/models/KnownLinuxVirtualMachineImage.java#L22-L27 and see if there is a replacement.
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1 && image != KnownLinuxVirtualMachineImage.SLES_15) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
&& image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO)
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
Yes, test case `VirtualMachinePopularImageTests#canCreateAllPopularImageVM` fails too in `LIVE` and `RECORD` modes at previous api-version.
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1 && image != KnownLinuxVirtualMachineImage.SLES_15) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
&& image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO)
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
see weidong's comment above https://github.com/Azure/azure-sdk-for-java/pull/38193/files#r1441226853
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1 && image != KnownLinuxVirtualMachineImage.SLES_15) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
&& image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO)
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
The comment `@Deprecated` has been fixed in the new version.
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1 && image != KnownLinuxVirtualMachineImage.SLES_15) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
&& image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO)
public void canCreateAllPopularImageVM() { if (skipInPlayback()) { return; } rgName = generateRandomResourceName("rg", 10); List<Mono<VirtualMachine>> vmMonos = new ArrayList<>(); for (KnownWindowsVirtualMachineImage image : Arrays.stream(KnownWindowsVirtualMachineImage.values()) .filter(image -> image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS_GEN2 && image != KnownWindowsVirtualMachineImage.WINDOWS_SERVER_2019_DATACENTER_WITH_CONTAINERS && image != KnownWindowsVirtualMachineImage.WINDOWS_DESKTOP_10_20H1_PRO) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularWindowsImage(image) .withAdminUsername("testUser") .withAdminPassword(password()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } for (KnownLinuxVirtualMachineImage image : Arrays.stream(KnownLinuxVirtualMachineImage.values()) .filter(image -> image != KnownLinuxVirtualMachineImage.OPENSUSE_LEAP_15_1 && image != KnownLinuxVirtualMachineImage.SLES_15_SP1) .collect(Collectors.toList())) { Mono<VirtualMachine> mono = computeManager.virtualMachines() .define(generateRandomResourceName("vm", 10)) .withRegion(Region.US_SOUTH_CENTRAL) .withNewResourceGroup(rgName) .withNewPrimaryNetwork("10.0.0.0/24") .withPrimaryPrivateIPAddressDynamic() .withoutPrimaryPublicIPAddress() .withPopularLinuxImage(image) .withRootUsername("testUser") .withSsh(sshPublicKey()) .withSize(VirtualMachineSizeTypes.fromString("Standard_D2a_v4")) .createAsync(); vmMonos.add(mono); } Flux.merge(vmMonos).blockLast(); }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
class VirtualMachinePopularImageTests extends ComputeManagementTest { private String rgName = ""; @Test @DoNotRecord(skipInPlayback = true) @Override protected void cleanUpResources() { resourceManager.resourceGroups().beginDeleteByName(rgName); } }
Nothing should throw a NullPointerException here
private static long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", contentLengthString, e); } return contentLength; }
} catch (NumberFormatException e) {
private static long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue(HttpHeaderName.CONTENT_LENGTH); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException e) { logger.log(LogLevel.INFORMATIONAL, () -> "Could not parse the HTTP header content-length: '" + contentLengthString + "'.", e); } return contentLength; }
class DefaultHttpResponseLogger implements HttpResponseLogger { @Override public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return Mono.just(response); } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return Mono.just(new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody)); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return Mono.just(response); } private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder); } } private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames)) .addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis()); } } private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) { String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH); if (!CoreUtils.isNullOrEmpty(contentLengthString)) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString); } } @Override public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return response; } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return response; } }
class DefaultHttpResponseLogger implements HttpResponseLogger { @Override public Mono<HttpResponse> logResponse(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return Mono.just(response); } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return Mono.just(new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody)); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return Mono.just(response); } private void logHeaders(ClientLogger logger, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogHeaders() && logger.canLogAtLevel(LogLevel.INFORMATIONAL)) { addHeadersToLogMessage(allowedHeaderNames, response.getHeaders(), logBuilder); } } private void logUrl(HttpResponseLoggingContext loggingOptions, HttpResponse response, LoggingEventBuilder logBuilder) { if (httpLogDetailLevel.shouldLogUrl()) { logBuilder .addKeyValue(LoggingKeys.STATUS_CODE_KEY, response.getStatusCode()) .addKeyValue(LoggingKeys.URL_KEY, getRedactedUrl(response.getRequest().getUrl(), allowedQueryParameterNames)) .addKeyValue(LoggingKeys.DURATION_MS_KEY, loggingOptions.getResponseDuration().toMillis()); } } private void logContentLength(HttpResponse response, LoggingEventBuilder logBuilder) { String contentLengthString = response.getHeaderValue(HttpHeaderName.CONTENT_LENGTH); if (!CoreUtils.isNullOrEmpty(contentLengthString)) { logBuilder.addKeyValue(LoggingKeys.CONTENT_LENGTH_KEY, contentLengthString); } } @Override public HttpResponse logResponseSync(ClientLogger logger, HttpResponseLoggingContext loggingOptions) { final LogLevel logLevel = getLogLevel(loggingOptions); final HttpResponse response = loggingOptions.getHttpResponse(); if (!logger.canLogAtLevel(logLevel)) { return response; } LoggingEventBuilder logBuilder = getLogBuilder(logLevel, logger); logContentLength(response, logBuilder); logUrl(loggingOptions, response, logBuilder); logHeaders(logger, response, logBuilder); if (httpLogDetailLevel.shouldLogBody()) { String contentTypeHeader = response.getHeaderValue(HttpHeaderName.CONTENT_TYPE); long contentLength = getContentLength(logger, response.getHeaders()); if (shouldBodyBeLogged(contentTypeHeader, contentLength)) { return new LoggingHttpResponse(response, logBuilder, logger, (int) contentLength, contentTypeHeader, prettyPrintBody); } } logBuilder.log(RESPONSE_LOG_MESSAGE); return response; } }
Nothing should throw a NullPointerException here
public static SemanticVersion parse(String version) { Objects.requireNonNull(version, "'version' cannot be null."); int majorDotIdx = version.indexOf('.'); if (majorDotIdx < 0) { return createInvalid(version); } int minorDotIdx = version.indexOf('.', majorDotIdx + 1); if (minorDotIdx < 0) { return createInvalid(version); } int patchEndIdx = minorDotIdx + 1; while (patchEndIdx < version.length()) { char ch = version.charAt(patchEndIdx); if (ch == '.' || ch == '-' || ch == '+') { break; } patchEndIdx++; } int extEndIdx = version.indexOf('+', patchEndIdx); if (extEndIdx < 0) { extEndIdx = version.length(); } try { int major = Integer.parseInt(version.substring(0, majorDotIdx)); int minor = Integer.parseInt(version.substring(majorDotIdx + 1, minorDotIdx)); int patch = Integer.parseInt(version.substring(minorDotIdx + 1, patchEndIdx)); String prerelease = (patchEndIdx == extEndIdx) ? "" : version.substring(patchEndIdx + 1, extEndIdx); return new SemanticVersion(major, minor, patch, prerelease, version); } catch (NumberFormatException ignored) { return createInvalid(version); } }
} catch (NumberFormatException ignored) {
public static SemanticVersion parse(String version) { Objects.requireNonNull(version, "'version' cannot be null."); int majorDotIdx = version.indexOf('.'); if (majorDotIdx < 0) { return createInvalid(version); } int minorDotIdx = version.indexOf('.', majorDotIdx + 1); if (minorDotIdx < 0) { return createInvalid(version); } int patchEndIdx = minorDotIdx + 1; while (patchEndIdx < version.length()) { char ch = version.charAt(patchEndIdx); if (ch == '.' || ch == '-' || ch == '+') { break; } patchEndIdx++; } int extEndIdx = version.indexOf('+', patchEndIdx); if (extEndIdx < 0) { extEndIdx = version.length(); } try { int major = Integer.parseInt(version.substring(0, majorDotIdx)); int minor = Integer.parseInt(version.substring(majorDotIdx + 1, minorDotIdx)); int patch = Integer.parseInt(version.substring(minorDotIdx + 1, patchEndIdx)); String prerelease = (patchEndIdx == extEndIdx) ? "" : version.substring(patchEndIdx + 1, extEndIdx); return new SemanticVersion(major, minor, patch, prerelease, version); } catch (NumberFormatException ignored) { return createInvalid(version); } }
class name to get package version of. * @return parsed {@link SemanticVersion}
class name to get package version of. * @return parsed {@link SemanticVersion}
`String.valueOf` returns `"null"` when the value is null. The old code had a redundant check
private static String serialize(SerializerAdapter serializer, Object value) { if (value == null) { return null; } if (value instanceof String) { return (String) value; } else if (value.getClass().isPrimitive() || value instanceof Number || value instanceof Boolean || value instanceof Character || value instanceof DateTimeRfc1123) { return String.valueOf(value); } else if (value instanceof OffsetDateTime) { return ((OffsetDateTime) value).format(DateTimeFormatter.ISO_INSTANT); } else if (value instanceof ExpandableStringEnum<?> || value.getClass().isEnum()) { return String.valueOf(value); } else { return serializer.serializeRaw(value); } }
return String.valueOf(value);
private static String serialize(SerializerAdapter serializer, Object value) { if (value == null) { return null; } if (value instanceof String) { return (String) value; } else if (value.getClass().isPrimitive() || value instanceof Number || value instanceof Boolean || value instanceof Character || value instanceof DateTimeRfc1123) { return String.valueOf(value); } else if (value instanceof OffsetDateTime) { return ((OffsetDateTime) value).format(DateTimeFormatter.ISO_INSTANT); } else if (value instanceof ExpandableStringEnum<?> || value.getClass().isEnum()) { String stringValue = value.toString(); return (stringValue == null) ? "null" : stringValue; } else { return serializer.serializeRaw(value); } }
class && requestOptionsPosition == -1) { requestOptionsPosition = i; }
class && requestOptionsPosition == -1) { requestOptionsPosition = i; }
[non-blocking] we can go even further and don't buffer the output stream (like https://github.com/Azure/azure-sdk-for-java/pull/38206/files#diff-37fd5025949a7a762faf4a100376503f75a80fe98f5cd5838e13cdc28e9c0886) caller is responsible for closing it anyway. Also, this is where closeable BinaryData would come really handy
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } }
BinaryData.fromByteBuffer(outputStream.toByteBuffer()));
private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } } /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } } /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
[to consider] what do you think about having a `BinaryData.isBuffered` for this check? Otherwise we tend to forget to update all the places where it's done when we add a new content type
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } }
|| body instanceof SerializableBinaryData || body instanceof StringBinaryData) {
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
nit: I think it's more readable, but probably nothing can bit perf of a good old switch and maybe we can get both of both worlds with `boolean canHaveBody(HttpMethod)` helper?
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } }
if (METHODS_WITHOUT_BODY.contains(method)) {
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
or actually, something like `BinaryData.copyTo(OutputStream)` can do the optimization in the implementation
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } }
|| body instanceof SerializableBinaryData || body instanceof StringBinaryData) {
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
Let's look at adding both. Possibly add on `BinaryData.copyTo(Channel)` as well. Though that API name may be a bit confusing as does `copyTo` buffer if the `BinaryData` isn't buffered?
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } }
|| body instanceof SerializableBinaryData || body instanceof StringBinaryData) {
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
Yeah, this was me being a bit lazy here to get this more readable and easier to work with. Will change this back to either a `switch` or `if` check.
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body != null) { HttpMethod method = httpRequest.getHttpMethod(); if (METHODS_WITHOUT_BODY.contains(method)) { return; } if (!METHODS_WITH_BODY.contains(method)) { throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } connection.setDoOutput(true); if (body instanceof ByteArrayBinaryData || body instanceof ByteBufferBinaryData || body instanceof SerializableBinaryData || body instanceof StringBinaryData) { try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { os.write(body.toBytes()); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } else { byte[] buffer = new byte[8192]; try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { InputStream is = body.toStream(); int read; while ((read = is.read(buffer)) != -1) { os.write(buffer, 0, read); } os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } } }
if (METHODS_WITHOUT_BODY.contains(method)) {
private void sendBody(HttpRequest httpRequest, Object progressReporter, HttpURLConnection connection) { BinaryData body = httpRequest.getBody(); if (body == null) { return; } HttpMethod method = httpRequest.getHttpMethod(); switch (httpRequest.getHttpMethod()) { case GET: case HEAD: return; case OPTIONS: case TRACE: case CONNECT: case POST: case PUT: case DELETE: connection.setDoOutput(true); try (DataOutputStream os = new DataOutputStream(connection.getOutputStream())) { body.writeTo(os); os.flush(); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } return; default: throw LOGGER.logThrowableAsError(new IllegalStateException("Unknown HTTP Method: " + method)); } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } private static final EnumSet<HttpMethod> METHODS_WITHOUT_BODY = EnumSet.of(HttpMethod.GET, HttpMethod.HEAD); private static final EnumSet<HttpMethod> METHODS_WITH_BODY = EnumSet.of(HttpMethod.OPTIONS, HttpMethod.TRACE, HttpMethod.CONNECT, HttpMethod.POST, HttpMethod.PUT, HttpMethod.DELETE); /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
class DefaultHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(DefaultHttpClient.class); private final long connectionTimeout; private final long readTimeout; private final ProxyOptions proxyOptions; DefaultHttpClient(Duration connectionTimeout, Duration readTimeout, ProxyOptions proxyOptions) { this.connectionTimeout = connectionTimeout == null ? -1 : connectionTimeout.toMillis(); this.readTimeout = readTimeout == null ? -1 : readTimeout.toMillis(); this.proxyOptions = proxyOptions; } /** * Synchronously send the HttpRequest. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ @Override public HttpResponse send(HttpRequest httpRequest) { if (httpRequest.getHttpMethod() == HttpMethod.PATCH) { return sendPatchViaSocket(httpRequest); } HttpURLConnection connection = connect(httpRequest); sendBody(httpRequest, null, connection); return receiveResponse(httpRequest, connection); } /** * Synchronously sends a PATCH request via a socket client. * * @param httpRequest The HTTP request being sent * * @return The HttpResponse object */ private HttpResponse sendPatchViaSocket(HttpRequest httpRequest) { try { return SocketClient.sendPatchRequest(httpRequest); } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Open a connection based on the HttpRequest URL * * <p>If a proxy is specified, the authorization type will default to 'Basic' unless Digest authentication is * specified in the 'Authorization' header.</p> * * @param httpRequest The HTTP Request being sent * * @return The HttpURLConnection object */ private HttpURLConnection connect(HttpRequest httpRequest) { try { HttpURLConnection connection; URL url = httpRequest.getUrl(); if (proxyOptions != null) { InetSocketAddress address = proxyOptions.getAddress(); if (address != null) { Proxy proxy = new Proxy(Proxy.Type.HTTP, address); connection = (HttpURLConnection) url.openConnection(proxy); if (proxyOptions.getUsername() != null && proxyOptions.getPassword() != null) { String authString = proxyOptions.getUsername() + ":" + proxyOptions.getPassword(); String authStringEnc = Base64.getEncoder().encodeToString(authString.getBytes()); connection.setRequestProperty("Proxy-Authorization", "Basic " + authStringEnc); } } else { throw new ConnectException("Invalid proxy address"); } } else { connection = (HttpURLConnection) url.openConnection(); } if (connectionTimeout != -1) { connection.setConnectTimeout((int) connectionTimeout); } if (readTimeout != -1) { connection.setReadTimeout((int) readTimeout); } try { connection.setRequestMethod(httpRequest.getHttpMethod().toString()); } catch (ProtocolException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } for (Header header : httpRequest.getHeaders()) { for (String value : header.getValues()) { connection.addRequestProperty(header.getName(), value); } } return connection; } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } } /** * Synchronously sends the content of an HttpRequest via an HttpUrlConnection instance. * * @param httpRequest The HTTP Request being sent * @param progressReporter A reporter for the progress of the request * @param connection The HttpURLConnection that is being sent to */ /** * Receive the response from the remote server * * @param httpRequest The HTTP Request being sent * @param connection The HttpURLConnection being sent to * * @return A HttpResponse object */ private HttpResponse receiveResponse(HttpRequest httpRequest, HttpURLConnection connection) { try { int responseCode = connection.getResponseCode(); Map<String, List<String>> hucHeaders = connection.getHeaderFields(); Headers responseHeaders = new Headers((int) (hucHeaders.size() / 0.75F)); for (Map.Entry<String, List<String>> entry : connection.getHeaderFields().entrySet()) { if (entry.getKey() != null) { responseHeaders.add(HeaderName.fromString(entry.getKey()), entry.getValue()); } } AccessibleByteArrayOutputStream outputStream = new AccessibleByteArrayOutputStream(); try (InputStream errorStream = connection.getErrorStream(); InputStream inputStream = (errorStream == null) ? connection.getInputStream() : errorStream) { byte[] buffer = new byte[8192]; int length; while ((length = inputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, length); } } return new DefaultHttpClientResponse(httpRequest, responseCode, responseHeaders, BinaryData.fromByteBuffer(outputStream.toByteBuffer())); } catch (IOException e) { throw LOGGER.logThrowableAsError(new RuntimeException(e)); } finally { connection.disconnect(); } } private static class SocketClient { private static final String HTTP_VERSION = " HTTP/1.1"; private static final SSLSocketFactory sslSocketFactory = (SSLSocketFactory) SSLSocketFactory.getDefault(); /** * Opens a socket connection, then writes the PATCH request across the * connection and reads the response * * @param httpRequest The HTTP Request being sent * @return an instance of HttpUrlConnectionResponse */ public static DefaultHttpClientResponse sendPatchRequest(HttpRequest httpRequest) throws IOException { final URL requestUrl = httpRequest.getUrl(); final String protocol = requestUrl.getProtocol(); final String host = requestUrl.getHost(); final int port = requestUrl.getPort(); switch (protocol) { case "https": { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(host, port)) { return doInputOutput(httpRequest, socket); } } case "http": { try (Socket socket = new Socket(host, port)) { return doInputOutput(httpRequest, socket); } } } throw new ProtocolException("Only HTTP and HTTPS are supported by this client."); } /** * Calls buildAndSend to send a String representation of the request across the output * stream, then calls buildResponse to get an instance of HttpUrlConnectionResponse * from the input stream * * @param httpRequest The HTTP Request being sent * @param socket An instance of the SocketClient * @return an instance of HttpUrlConnectionResponse */ @SuppressWarnings("deprecation") private static DefaultHttpClientResponse doInputOutput(HttpRequest httpRequest, Socket socket) throws IOException { httpRequest.setHeader(HeaderName.HOST, httpRequest.getUrl().getHost()); if (!"keep-alive".equalsIgnoreCase(httpRequest.getHeaders().getValue(HeaderName.CONNECTION))) { httpRequest.setHeader(HeaderName.CONNECTION, "close"); } try ( BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8)); OutputStreamWriter out = new OutputStreamWriter(socket.getOutputStream())) { buildAndSend(httpRequest, out); DefaultHttpClientResponse response = buildResponse(httpRequest, in); Header locationHeader = response.getHeaders().get(HeaderName.LOCATION); String redirectLocation = (locationHeader == null) ? null : locationHeader.getValue(); if (redirectLocation != null) { if (redirectLocation.startsWith("http")) { httpRequest.setUrl(redirectLocation); } else { httpRequest.setUrl(new URL(httpRequest.getUrl(), redirectLocation)); } return sendPatchRequest(httpRequest); } return response; } } /** * Converts an instance of HttpRequest to a String representation for sending * over the output stream * * @param httpRequest The HTTP Request being sent * @param out output stream for writing the request */ private static void buildAndSend(HttpRequest httpRequest, OutputStreamWriter out) throws IOException { final StringBuilder request = new StringBuilder(); request.append("PATCH") .append(" ") .append(httpRequest.getUrl().getPath()) .append(HTTP_VERSION) .append("\r\n"); if (httpRequest.getHeaders().getSize() > 0) { for (Header header : httpRequest.getHeaders()) { header.getValuesList().forEach(value -> request.append(header.getName()) .append(": ") .append(value) .append("\r\n")); } } if (httpRequest.getBody() != null) { request.append("\r\n") .append(httpRequest.getBody().toString()) .append("\r\n"); } out.write(request.toString()); out.flush(); } /** * Reads the response from the input stream and extracts the information * needed to construct an instance of HttpUrlConnectionResponse * * @param httpRequest The HTTP Request being sent * @param reader the input stream from the socket * @return an instance of HttpUrlConnectionResponse */ private static DefaultHttpClientResponse buildResponse(HttpRequest httpRequest, BufferedReader reader) throws IOException { String statusLine = reader.readLine(); int dotIndex = statusLine.indexOf('.'); int statusCode = Integer.parseInt(statusLine.substring(dotIndex+3, dotIndex+6)); Headers headers = new Headers(); String line; while ((line = reader.readLine()) != null && !line.isEmpty()) { String[] kv = line.split(": ", 2); String k = kv[0]; String v = kv[1]; headers.add(HeaderName.fromString(k), v); } StringBuilder bodyString = new StringBuilder(); while ((line = reader.readLine()) != null) { bodyString.append(line); } BinaryData body = BinaryData.fromByteBuffer(ByteBuffer.wrap(bodyString.toString().getBytes())); return new DefaultHttpClientResponse(httpRequest, statusCode, headers, body); } } }
Are these tests making service calls? If not, should we use virtual timer to speed this up?
public void waitUntilOperationWithTimeout() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(expected); } else { return Mono.delay(Duration.ofSeconds(5)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000), SUCCESSFULLY_COMPLETED)); assertEquals("0", pollResponse.getValue().getResponse()); }
return Mono.delay(Duration.ofSeconds(5))
public void waitUntilOperationWithTimeout() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; PollResponse<Response> expected = new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10)); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(expected); } else { return Mono.delay(Duration.ofSeconds(5)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = assertDoesNotThrow(() -> poller.waitUntil(Duration.ofMillis(1000), SUCCESSFULLY_COMPLETED)); assertEquals("0", pollResponse.getValue().getResponse()); }
class PollerTests { private static final Duration STEPVERIFIER_TIMEOUT = Duration.ofSeconds(30); @Test public void asyncPollerConstructorPollIntervalZero() { assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ZERO, ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollIntervalNegative() { assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ofSeconds(-1), ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollIntervalNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(null, ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorActivationOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorCancelOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, ignored -> null, null, ignored -> null)); } @Test public void asyncPollerConstructorFetchResultOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, ignored -> null, (ignored1, ignored2) -> null, null)); } @Test public void subscribeToSpecificOtherOperationStatusTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false), new Response("2"), retryAfter); PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_2", false), new Response("3"), retryAfter); PollResponse<Response> response4 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("4"), retryAfter); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.empty(); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); case 3: return Mono.just(response3); case 4: return Mono.just(response4); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response3.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response4.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void noPollingForSynchronouslyCompletedActivationTest() { int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("ActivationDone")); }); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation.")); PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() { int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("ActivationDone")); }); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation.")); SyncPoller<Response, CertificateOutput> syncPoller = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> (Mono<CertificateOutput>) null) .getSyncPoller(); try { PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1)); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); assertEquals(1, activationCallCount[0]); } catch (Exception e) { fail("SyncPoller did not complete on activation", e); } } @Test public void ensurePollingForInProgressActivationResponseTest() { final Duration retryAfter = Duration.ofMillis(10); int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(IN_PROGRESS, new Response("ActivationDone")); }); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false), new Response("2"), retryAfter); PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("3"), retryAfter); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); case 3: return Mono.just(response3); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .assertNext(asyncPollResponse -> assertEquals(response0.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response1.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response2.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response3.getStatus(), asyncPollResponse.getStatus())) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void subscribeToActivationOnlyOnceTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new Response("ActivationDone"); }); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollCallCount[0] = 0; StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void cancellationCanBeCalledFromOperatorChainTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; final List<Object> cancelParameters = new ArrayList<>(); BiFunction<PollingContext<Response>, PollResponse<Response>, Mono<Response>> cancelOperation = (pollingContext, pollResponse) -> { Collections.addAll(cancelParameters, pollingContext, pollResponse); return Mono.just(new Response("OperationCancelled")); }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, cancelOperation, ignored -> null); AtomicReference<AsyncPollResponse<Response, CertificateOutput>> secondAsyncResponse = new AtomicReference<>(); Response cancelResponse = pollerFlux .take(2) .last() .flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<Response>>) asyncPollResponse -> { secondAsyncResponse.set(asyncPollResponse); return asyncPollResponse.cancelOperation(); }).block(); Assertions.assertNotNull(cancelResponse); Assertions.assertTrue(cancelResponse.getResponse().equalsIgnoreCase("OperationCancelled")); Assertions.assertNotNull(secondAsyncResponse.get()); Assertions.assertEquals("1", secondAsyncResponse.get().getValue().getResponse()); assertEquals(2, cancelParameters.size()); assertEquals(activationResponse, ((PollingContext<?>) cancelParameters.get(0)).getActivationResponse() .getValue()); assertEquals(activationResponse, ((PollResponse<?>) cancelParameters.get(1)).getValue()); } @Test public void getResultCanBeCalledFromOperatorChainTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter)); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter)); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; final List<PollingContext<Response>> fetchResultParameters = new ArrayList<>(); Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = pollingContext -> { fetchResultParameters.add(pollingContext); return Mono.just(new CertificateOutput("LROFinalResult")); }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); AtomicReference<AsyncPollResponse<Response, CertificateOutput>> terminalAsyncResponse = new AtomicReference<>(); CertificateOutput lroResult = pollerFlux .takeUntil(apr -> apr.getStatus().isComplete()) .last() .flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<CertificateOutput>>) asyncPollResponse -> { terminalAsyncResponse.set(asyncPollResponse); return asyncPollResponse.getFinalResult(); }).block(); Assertions.assertNotNull(lroResult); Assertions.assertTrue(lroResult.getName().equalsIgnoreCase("LROFinalResult")); Assertions.assertNotNull(terminalAsyncResponse.get()); Assertions.assertTrue(terminalAsyncResponse.get().getValue().getResponse().equalsIgnoreCase("2")); assertEquals(1, fetchResultParameters.size()); PollingContext<Response> pollingContext = fetchResultParameters.get(0); assertEquals(activationResponse, pollingContext.getActivationResponse().getValue()); assertEquals(response2, pollingContext.getLatestResponse()); } @Test public void verifyExceptionPropagationFromPollingOperation() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.error(new RuntimeException("Polling operation failed!")); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"))); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectErrorMessage("Polling operation failed!") .verify(STEPVERIFIER_TIMEOUT); } @Test public void verifyErrorFromPollingOperation() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.just(new PollResponse<>(FAILED, new Response("2"))); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("3"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("4"))); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == FAILED) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void syncPollerConstructorPollIntervalZero() { assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ZERO, cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollIntervalNegative() { assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(-1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollIntervalNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(null, cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncConstructorActivationOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorCancelOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null, ignored -> null)); } @Test public void syncPollerConstructorFetchResultOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, null)); } @Test public void syncPollerShouldCallActivationFromConstructor() { Boolean[] activationCalled = new Boolean[1]; activationCalled[0] = false; Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> { activationCalled[0] = true; return new Response("ActivationDone"); }); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), ignored -> null, (ignored1, ignored2) -> null, ignored -> null); Assertions.assertTrue(activationCalled[0]); } @Test public void eachPollShouldReceiveLastPollResponse() { Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(new Response("A")); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = pollingContext -> { Assertions.assertNotNull(pollingContext.getActivationResponse()); Assertions.assertNotNull(pollingContext.getLatestResponse()); PollResponse<Response> latestResponse = pollingContext.getLatestResponse(); Assertions.assertNotNull(latestResponse); return Mono.just(new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"), Duration.ofMillis(10))); }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: AA")); pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: Response: AAA")); pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: Response: Response: AAAA")); } @Test public void waitForCompletionShouldReturnTerminalPollResponse() { PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10)); final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitForCompletion(); Assertions.assertNotNull(pollResponse.getValue()); assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse()); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus()); } @Test public void getResultShouldPollUntilCompletionAndFetchResult() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; switch (invocationCount[0]) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10))); default: return Mono.error(new RuntimeException("Poll should not be called after terminal response")); } }; Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = ignored -> Mono.just(new CertificateOutput("cert1")); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); CertificateOutput certificateOutput = poller.getFinalResult(); Assertions.assertNotNull(certificateOutput); assertEquals("cert1", certificateOutput.getName()); assertEquals(2, invocationCount[0]); } @Test public void getResultShouldNotPollOnCompletedPoller() { PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10)); final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = ignored -> Mono.just(new CertificateOutput("cert1")); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); PollResponse<Response> pollResponse = poller.waitForCompletion(); Assertions.assertNotNull(pollResponse.getValue()); assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse()); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus()); CertificateOutput certificateOutput = poller.getFinalResult(); Assertions.assertNotNull(certificateOutput); assertEquals("cert1", certificateOutput.getName()); } @Test public void waitUntilShouldPollAfterMatchingStatus() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); LongRunningOperationStatus matchStatus = LongRunningOperationStatus.fromString("OTHER_1", false); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; switch (invocationCount[0]) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(new PollResponse<>(matchStatus, new Response("1"), Duration.ofMillis(10))); default: return Mono.error(new RuntimeException("Poll should not be called after matching response")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitUntil(matchStatus); assertEquals(matchStatus, pollResponse.getStatus()); assertEquals(2, invocationCount[0]); } @Test public void verifyExceptionPropagationFromPollingOperationSyncPoller() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.error(new RuntimeException("Polling operation failed!")); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult); assertEquals(exception.getMessage(), "Polling operation failed!"); } @Test public void testPollerFluxError() throws InterruptedException { IllegalArgumentException expectedException = new IllegalArgumentException(); PollerFlux<String, String> pollerFlux = error(expectedException); CountDownLatch countDownLatch = new CountDownLatch(1); pollerFlux.subscribe( response -> Assertions.fail("Did not expect a response"), ex -> { countDownLatch.countDown(); Assertions.assertSame(expectedException, ex); }, () -> Assertions.fail("Did not expect the flux to complete") ); boolean completed = countDownLatch.await(1, TimeUnit.SECONDS); Assertions.assertTrue(completed); } @Test public void testSyncPollerError() { PollerFlux<String, String> pollerFlux = error(new IllegalArgumentException()); Assertions.assertThrows(IllegalArgumentException.class, pollerFlux::getSyncPoller); } @Test public void testUpdatePollingIntervalWithoutVirtualTimer() { PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")), (context, response) -> Mono.just("Cancel"), context -> Mono.just("FinalResult")); pollerFlux.setPollInterval(Duration.ofMillis(200)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofSeconds(1)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void testUpdatePollingInterval() { PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")), (context, response) -> Mono.just("Cancel"), context -> Mono.just("FinalResult")); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofMillis(55)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollerFlux.setPollInterval(Duration.ofMillis(50)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofMillis(255)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollerFlux.setPollInterval(Duration.ofMillis(195)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofSeconds(1)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void waitForCompletionSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.waitForCompletion(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation * doesn't complete within the timeout period. */ @Test public void waitForCompletionOperationTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } else { return Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.waitForCompletion(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void waitUntilSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED); assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse()); } /** * Tests that the last received PollResponse is used when waitUtil times out. */ @Test /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void getFinalResultSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.getFinalResult(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation * doesn't complete within the timeout period. */ @Test public void getFinalResultOperationTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } else { return Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.getFinalResult(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } private static String printException(Throwable throwable) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); throwable.printStackTrace(pw); return sw.toString(); } public static class Response { private final String response; public Response(String response) { this.response = response; } public String getResponse() { return response; } @Override public String toString() { return "Response: " + response; } } public static class CertificateOutput { String name; public CertificateOutput(String certName) { name = certName; } public String getName() { return name; } } }
class PollerTests { private static final Duration STEPVERIFIER_TIMEOUT = Duration.ofSeconds(30); @Test public void asyncPollerConstructorPollIntervalZero() { assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ZERO, ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollIntervalNegative() { assertThrows(IllegalArgumentException.class, () -> new PollerFlux<>(Duration.ofSeconds(-1), ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollIntervalNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(null, ignored -> null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorActivationOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorPollOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void asyncPollerConstructorCancelOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, ignored -> null, null, ignored -> null)); } @Test public void asyncPollerConstructorFetchResultOperationNull() { assertThrows(NullPointerException.class, () -> new PollerFlux<>(Duration.ofSeconds(1), ignored -> null, ignored -> null, (ignored1, ignored2) -> null, null)); } @Test public void subscribeToSpecificOtherOperationStatusTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false), new Response("2"), retryAfter); PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_2", false), new Response("3"), retryAfter); PollResponse<Response> response4 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("4"), retryAfter); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.empty(); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); case 3: return Mono.just(response3); case 4: return Mono.just(response4); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response3.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response4.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void noPollingForSynchronouslyCompletedActivationTest() { int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("ActivationDone")); }); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation.")); PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void noPollingForSynchronouslyCompletedActivationInSyncPollerTest() { int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("ActivationDone")); }); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.error(new RuntimeException("Polling shouldn't happen for synchronously completed activation.")); SyncPoller<Response, CertificateOutput> syncPoller = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> (Mono<CertificateOutput>) null) .getSyncPoller(); try { PollResponse<Response> response = syncPoller.waitForCompletion(Duration.ofSeconds(1)); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, response.getStatus()); assertEquals(1, activationCallCount[0]); } catch (Exception e) { fail("SyncPoller did not complete on activation", e); } } @Test public void ensurePollingForInProgressActivationResponseTest() { final Duration retryAfter = Duration.ofMillis(10); int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> activationOperationWithResponse = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new PollResponse<>(IN_PROGRESS, new Response("ActivationDone")); }); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.fromString("OTHER_1", false), new Response("2"), retryAfter); PollResponse<Response> response3 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("3"), retryAfter); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); case 3: return Mono.just(response3); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = create(Duration.ofMillis(10), activationOperationWithResponse, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .assertNext(asyncPollResponse -> assertEquals(response0.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response1.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response2.getStatus(), asyncPollResponse.getStatus())) .assertNext(asyncPollResponse -> assertEquals(response3.getStatus(), asyncPollResponse.getStatus())) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void subscribeToActivationOnlyOnceTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); int[] activationCallCount = new int[1]; Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> { activationCallCount[0]++; return new Response("ActivationDone"); }); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollCallCount[0] = 0; StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response0.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response1.getStatus()) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == response2.getStatus()) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); assertEquals(1, activationCallCount[0]); } @Test public void cancellationCanBeCalledFromOperatorChainTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response0 = new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter); PollResponse<Response> response1 = new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(response0); case 1: return Mono.just(response1); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; final List<Object> cancelParameters = new ArrayList<>(); BiFunction<PollingContext<Response>, PollResponse<Response>, Mono<Response>> cancelOperation = (pollingContext, pollResponse) -> { Collections.addAll(cancelParameters, pollingContext, pollResponse); return Mono.just(new Response("OperationCancelled")); }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, cancelOperation, ignored -> null); AtomicReference<AsyncPollResponse<Response, CertificateOutput>> secondAsyncResponse = new AtomicReference<>(); Response cancelResponse = pollerFlux .take(2) .last() .flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<Response>>) asyncPollResponse -> { secondAsyncResponse.set(asyncPollResponse); return asyncPollResponse.cancelOperation(); }).block(); Assertions.assertNotNull(cancelResponse); Assertions.assertTrue(cancelResponse.getResponse().equalsIgnoreCase("OperationCancelled")); Assertions.assertNotNull(secondAsyncResponse.get()); Assertions.assertEquals("1", secondAsyncResponse.get().getValue().getResponse()); assertEquals(2, cancelParameters.size()); assertEquals(activationResponse, ((PollingContext<?>) cancelParameters.get(0)).getActivationResponse() .getValue()); assertEquals(activationResponse, ((PollResponse<?>) cancelParameters.get(1)).getValue()); } @Test public void getResultCanBeCalledFromOperatorChainTest() { final Duration retryAfter = Duration.ofMillis(10); PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), retryAfter); final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] callCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (callCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), retryAfter)); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), retryAfter)); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; final List<PollingContext<Response>> fetchResultParameters = new ArrayList<>(); Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = pollingContext -> { fetchResultParameters.add(pollingContext); return Mono.just(new CertificateOutput("LROFinalResult")); }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); AtomicReference<AsyncPollResponse<Response, CertificateOutput>> terminalAsyncResponse = new AtomicReference<>(); CertificateOutput lroResult = pollerFlux .takeUntil(apr -> apr.getStatus().isComplete()) .last() .flatMap((Function<AsyncPollResponse<Response, CertificateOutput>, Mono<CertificateOutput>>) asyncPollResponse -> { terminalAsyncResponse.set(asyncPollResponse); return asyncPollResponse.getFinalResult(); }).block(); Assertions.assertNotNull(lroResult); Assertions.assertTrue(lroResult.getName().equalsIgnoreCase("LROFinalResult")); Assertions.assertNotNull(terminalAsyncResponse.get()); Assertions.assertTrue(terminalAsyncResponse.get().getValue().getResponse().equalsIgnoreCase("2")); assertEquals(1, fetchResultParameters.size()); PollingContext<Response> pollingContext = fetchResultParameters.get(0); assertEquals(activationResponse, pollingContext.getActivationResponse().getValue()); assertEquals(response2, pollingContext.getLatestResponse()); } @Test public void verifyExceptionPropagationFromPollingOperation() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.error(new RuntimeException("Polling operation failed!")); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"))); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectErrorMessage("Polling operation failed!") .verify(STEPVERIFIER_TIMEOUT); } @Test public void verifyErrorFromPollingOperation() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.just(new PollResponse<>(FAILED, new Response("2"))); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("3"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("4"))); } }; PollerFlux<Response, CertificateOutput> pollerFlux = new PollerFlux<>(Duration.ofMillis(10), activationOperation, pollOperation, (ignored1, ignored2) -> null, ignored -> null); StepVerifier.create(pollerFlux) .expectSubscription() .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == IN_PROGRESS) .expectNextMatches(asyncPollResponse -> asyncPollResponse.getStatus() == FAILED) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void syncPollerConstructorPollIntervalZero() { assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ZERO, cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollIntervalNegative() { assertThrows(IllegalArgumentException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(-1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollIntervalNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(null, cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncConstructorActivationOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), null, ignored -> null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorPollOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), null, (ignored1, ignored2) -> null, ignored -> null)); } @Test public void syncPollerConstructorCancelOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, null, ignored -> null)); } @Test public void syncPollerConstructorFetchResultOperationNull() { assertThrows(NullPointerException.class, () -> new SyncOverAsyncPoller<>(Duration.ofSeconds(1), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, null), ignored -> null, (ignored1, ignored2) -> null, null)); } @Test public void syncPollerShouldCallActivationFromConstructor() { Boolean[] activationCalled = new Boolean[1]; activationCalled[0] = false; Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.fromCallable(() -> { activationCalled[0] = true; return new Response("ActivationDone"); }); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), ignored -> null, (ignored1, ignored2) -> null, ignored -> null); Assertions.assertTrue(activationCalled[0]); } @Test public void eachPollShouldReceiveLastPollResponse() { Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(new Response("A")); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = pollingContext -> { Assertions.assertNotNull(pollingContext.getActivationResponse()); Assertions.assertNotNull(pollingContext.getLatestResponse()); PollResponse<Response> latestResponse = pollingContext.getLatestResponse(); Assertions.assertNotNull(latestResponse); return Mono.just(new PollResponse<>(IN_PROGRESS, new Response(latestResponse.getValue().toString() + "A"), Duration.ofMillis(10))); }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: AA")); pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: Response: AAA")); pollResponse = poller.poll(); Assertions.assertNotNull(pollResponse); Assertions.assertNotNull(pollResponse.getValue().getResponse()); Assertions.assertTrue(pollResponse.getValue() .getResponse() .equalsIgnoreCase("Response: Response: Response: AAAA")); } @Test public void waitForCompletionShouldReturnTerminalPollResponse() { PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10)); final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitForCompletion(); Assertions.assertNotNull(pollResponse.getValue()); assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse()); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus()); } @Test public void getResultShouldPollUntilCompletionAndFetchResult() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; switch (invocationCount[0]) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10))); default: return Mono.error(new RuntimeException("Poll should not be called after terminal response")); } }; Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = ignored -> Mono.just(new CertificateOutput("cert1")); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); CertificateOutput certificateOutput = poller.getFinalResult(); Assertions.assertNotNull(certificateOutput); assertEquals("cert1", certificateOutput.getName()); assertEquals(2, invocationCount[0]); } @Test public void getResultShouldNotPollOnCompletedPoller() { PollResponse<Response> response2 = new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, new Response("2"), Duration.ofMillis(10)); final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<CertificateOutput>> fetchResultOperation = ignored -> Mono.just(new CertificateOutput("cert1")); int[] pollCallCount = new int[1]; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { switch (pollCallCount[0]++) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(response2); default: return Mono.error(new IllegalStateException("Too many requests")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, fetchResultOperation); PollResponse<Response> pollResponse = poller.waitForCompletion(); Assertions.assertNotNull(pollResponse.getValue()); assertEquals(response2.getValue().getResponse(), pollResponse.getValue().getResponse()); assertEquals(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollResponse.getStatus()); CertificateOutput certificateOutput = poller.getFinalResult(); Assertions.assertNotNull(certificateOutput); assertEquals("cert1", certificateOutput.getName()); } @Test public void waitUntilShouldPollAfterMatchingStatus() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); LongRunningOperationStatus matchStatus = LongRunningOperationStatus.fromString("OTHER_1", false); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; switch (invocationCount[0]) { case 0: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); case 1: return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"), Duration.ofMillis(10))); case 2: return Mono.just(new PollResponse<>(matchStatus, new Response("1"), Duration.ofMillis(10))); default: return Mono.error(new RuntimeException("Poll should not be called after matching response")); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitUntil(matchStatus); assertEquals(matchStatus, pollResponse.getStatus()); assertEquals(2, invocationCount[0]); } @Test public void verifyExceptionPropagationFromPollingOperationSyncPoller() { final Response activationResponse = new Response("Foo"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); final AtomicInteger cnt = new AtomicInteger(); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = (pollingContext) -> { int count = cnt.incrementAndGet(); if (count <= 2) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("1"))); } else if (count == 3) { return Mono.error(new RuntimeException("Polling operation failed!")); } else if (count == 4) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("2"))); } else { return Mono.just(new PollResponse<>(SUCCESSFULLY_COMPLETED, new Response("3"))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, poller::getFinalResult); assertEquals(exception.getMessage(), "Polling operation failed!"); } @Test public void testPollerFluxError() throws InterruptedException { IllegalArgumentException expectedException = new IllegalArgumentException(); PollerFlux<String, String> pollerFlux = error(expectedException); CountDownLatch countDownLatch = new CountDownLatch(1); pollerFlux.subscribe( response -> Assertions.fail("Did not expect a response"), ex -> { countDownLatch.countDown(); Assertions.assertSame(expectedException, ex); }, () -> Assertions.fail("Did not expect the flux to complete") ); boolean completed = countDownLatch.await(1, TimeUnit.SECONDS); Assertions.assertTrue(completed); } @Test public void testSyncPollerError() { PollerFlux<String, String> pollerFlux = error(new IllegalArgumentException()); Assertions.assertThrows(IllegalArgumentException.class, pollerFlux::getSyncPoller); } @Test public void testUpdatePollingIntervalWithoutVirtualTimer() { PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")), (context, response) -> Mono.just("Cancel"), context -> Mono.just("FinalResult")); pollerFlux.setPollInterval(Duration.ofMillis(200)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofSeconds(1)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } @Test public void testUpdatePollingInterval() { PollerFlux<String, String> pollerFlux = PollerFlux.create(Duration.ofMillis(10), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "Activation")), context -> Mono.just(new PollResponse<>(IN_PROGRESS, "PollOperation")), (context, response) -> Mono.just("Cancel"), context -> Mono.just("FinalResult")); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofMillis(55)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollerFlux.setPollInterval(Duration.ofMillis(50)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofMillis(255)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); pollerFlux.setPollInterval(Duration.ofMillis(195)); StepVerifier.create(pollerFlux.take(5)) .thenAwait(Duration.ofSeconds(1)) .expectNextCount(5) .expectComplete() .verify(STEPVERIFIER_TIMEOUT); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void waitForCompletionSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.waitForCompletion(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation * doesn't complete within the timeout period. */ @Test public void waitForCompletionOperationTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } else { return Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.waitForCompletion(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void waitUntilSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); PollResponse<Response> pollResponse = poller.waitUntil(Duration.ofMillis(100), SUCCESSFULLY_COMPLETED); assertEquals(activationResponse.getResponse(), pollResponse.getValue().getResponse()); } /** * Tests that the last received PollResponse is used when waitUtil times out. */ @Test /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if a single poll takes longer * than the timeout period. */ @Test public void getFinalResultSinglePollTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.getFinalResult(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } /** * Tests that a {@link RuntimeException} wrapping a {@link TimeoutException} is thrown if the polling operation * doesn't complete within the timeout period. */ @Test public void getFinalResultOperationTimesOut() { final Response activationResponse = new Response("Activated"); Function<PollingContext<Response>, Mono<Response>> activationOperation = ignored -> Mono.just(activationResponse); int[] invocationCount = new int[1]; invocationCount[0] = -1; Function<PollingContext<Response>, Mono<PollResponse<Response>>> pollOperation = ignored -> { invocationCount[0]++; if (invocationCount[0] == 0) { return Mono.just(new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } else { return Mono.delay(Duration.ofSeconds(2)) .map(ignored2 -> new PollResponse<>(IN_PROGRESS, new Response("0"), Duration.ofMillis(10))); } }; SyncPoller<Response, CertificateOutput> poller = new SyncOverAsyncPoller<>(Duration.ofMillis(10), cxt -> new PollResponse<>(LongRunningOperationStatus.NOT_STARTED, activationOperation.apply(cxt).block()), pollOperation, (ignored1, ignored2) -> null, ignored -> null); RuntimeException exception = assertThrows(RuntimeException.class, () -> poller.getFinalResult(Duration.ofMillis(100))); assertInstanceOf(TimeoutException.class, exception.getCause(), () -> printException(exception)); } private static String printException(Throwable throwable) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); throwable.printStackTrace(pw); return sw.toString(); } public static class Response { private final String response; public Response(String response) { this.response = response; } public String getResponse() { return response; } @Override public String toString() { return "Response: " + response; } } public static class CertificateOutput { String name; public CertificateOutput(String certName) { name = certName; } public String getName() { return name; } } }